8255237: ZGC: Bulk free garbage pages during relocation set selection

Co-authored-by: Albert Mingkun Yang <ayang@openjdk.org>
Co-authored-by: Per Liden <pliden@openjdk.org>
Reviewed-by: ayang, eosterlund
This commit is contained in:
Per Liden 2020-11-02 17:00:34 +00:00
parent 6dac8d2780
commit 3e89f729af
7 changed files with 120 additions and 52 deletions

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/shared/locationPrinter.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zArray.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zHeapIterator.hpp"
@ -220,6 +221,17 @@ void ZHeap::free_page(ZPage* page, bool reclaimed) {
_page_allocator.free_page(page, reclaimed);
}
void ZHeap::free_pages(const ZArray<ZPage*>* pages, bool reclaimed) {
// Remove page table entries
ZArrayIterator<ZPage*> iter(pages);
for (ZPage* page; iter.next(&page);) {
_page_table.remove(page);
}
// Free pages
_page_allocator.free_pages(pages, reclaimed);
}
void ZHeap::flip_to_marked() {
ZVerifyViewsFlip flip(&_page_allocator);
ZAddress::flip_to_marked();
@ -349,6 +361,16 @@ void ZHeap::process_non_strong_references() {
_reference_processor.enqueue_references();
}
void ZHeap::free_garbage_pages(ZRelocationSetSelector* selector, int bulk) {
// Freeing garbage pages in bulk is an optimization to avoid grabbing
// the page allocator lock, and trying to satisfy stalled allocations
// too frequently.
if (selector->should_free_garbage_pages(bulk)) {
free_pages(selector->garbage_pages(), true /* reclaimed */);
selector->clear_garbage_pages();
}
}
void ZHeap::select_relocation_set() {
// Do not allow pages to be deleted
_page_allocator.enable_deferred_delete();
@ -369,11 +391,14 @@ void ZHeap::select_relocation_set() {
// Register garbage page
selector.register_garbage_page(page);
// Reclaim page immediately
free_page(page, true /* reclaimed */);
// Reclaim garbage pages in bulk
free_garbage_pages(&selector, 64 /* bulk */);
}
}
// Reclaim remaining garbage pages
free_garbage_pages(&selector, 0 /* bulk */);
// Allow pages to be deleted
_page_allocator.disable_deferred_delete();

@ -25,6 +25,7 @@
#define SHARE_GC_Z_ZHEAP_HPP
#include "gc/z/zAllocationFlags.hpp"
#include "gc/z/zArray.hpp"
#include "gc/z/zForwardingTable.hpp"
#include "gc/z/zMark.hpp"
#include "gc/z/zObjectAllocator.hpp"
@ -40,6 +41,7 @@
class ThreadClosure;
class ZPage;
class ZRelocationSetSelector;
class ZHeap {
friend class VMStructs;
@ -63,6 +65,8 @@ private:
void flip_to_marked();
void flip_to_remapped();
void free_garbage_pages(ZRelocationSetSelector* selector, int bulk);
void out_of_memory();
public:
@ -110,6 +114,7 @@ public:
ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags);
void undo_alloc_page(ZPage* page);
void free_page(ZPage* page, bool reclaimed);
void free_pages(const ZArray<ZPage*>* pages, bool reclaimed);
// Object allocation
uintptr_t alloc_tlab(size_t size);

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/z/zArray.inline.hpp"
#include "gc/z/zCollectedHeap.hpp"
#include "gc/z/zFuture.inline.hpp"
#include "gc/z/zGlobals.hpp"
@ -748,6 +749,19 @@ void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
satisfy_stalled();
}
void ZPageAllocator::free_pages(const ZArray<ZPage*>* pages, bool reclaimed) {
ZLocker<ZLock> locker(&_lock);
// Free pages
ZArrayIterator<ZPage*> iter(pages);
for (ZPage* page; iter.next(&page);) {
free_page_inner(page, reclaimed);
}
// Try satisfy stalled allocations
satisfy_stalled();
}
size_t ZPageAllocator::uncommit(uint64_t* timeout) {
// We need to join the suspendible thread set while manipulating capacity and
// used, to make sure GC safepoints will have a consistent view. However, when

@ -25,6 +25,7 @@
#define SHARE_GC_Z_ZPAGEALLOCATOR_HPP
#include "gc/z/zAllocationFlags.hpp"
#include "gc/z/zArray.hpp"
#include "gc/z/zList.hpp"
#include "gc/z/zLock.hpp"
#include "gc/z/zPageCache.hpp"
@ -124,6 +125,7 @@ public:
ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags);
void free_page(ZPage* page, bool reclaimed);
void free_pages(const ZArray<ZPage*>* pages, bool reclaimed);
void enable_deferred_delete() const;
void disable_deferred_delete() const;

@ -54,31 +54,6 @@ ZRelocationSetSelectorGroup::ZRelocationSetSelectorGroup(const char* name,
_forwarding_entries(0),
_stats() {}
void ZRelocationSetSelectorGroup::register_live_page(ZPage* page) {
const uint8_t type = page->type();
const size_t size = page->size();
const size_t live = page->live_bytes();
const size_t garbage = size - live;
if (garbage > _fragmentation_limit) {
_registered_pages.append(page);
}
_stats._npages++;
_stats._total += size;
_stats._live += live;
_stats._garbage += garbage;
}
void ZRelocationSetSelectorGroup::register_garbage_page(ZPage* page) {
const size_t size = page->size();
_stats._npages++;
_stats._total += size;
_stats._garbage += size;
_stats._empty += size;
}
bool ZRelocationSetSelectorGroup::is_disabled() {
// Medium pages are disabled when their page size is zero
return _page_type == ZPageTypeMedium && _page_size == 0;
@ -205,31 +180,8 @@ void ZRelocationSetSelectorGroup::select() {
ZRelocationSetSelector::ZRelocationSetSelector() :
_small("Small", ZPageTypeSmall, ZPageSizeSmall, ZObjectSizeLimitSmall),
_medium("Medium", ZPageTypeMedium, ZPageSizeMedium, ZObjectSizeLimitMedium),
_large("Large", ZPageTypeLarge, 0 /* page_size */, 0 /* object_size_limit */) {}
void ZRelocationSetSelector::register_live_page(ZPage* page) {
const uint8_t type = page->type();
if (type == ZPageTypeSmall) {
_small.register_live_page(page);
} else if (type == ZPageTypeMedium) {
_medium.register_live_page(page);
} else {
_large.register_live_page(page);
}
}
void ZRelocationSetSelector::register_garbage_page(ZPage* page) {
const uint8_t type = page->type();
if (type == ZPageTypeSmall) {
_small.register_garbage_page(page);
} else if (type == ZPageTypeMedium) {
_medium.register_garbage_page(page);
} else {
_large.register_garbage_page(page);
}
}
_large("Large", ZPageTypeLarge, 0 /* page_size */, 0 /* object_size_limit */),
_garbage_pages() {}
void ZRelocationSetSelector::select() {
// Select pages to relocate. The resulting relocation set will be

@ -104,6 +104,7 @@ private:
ZRelocationSetSelectorGroup _small;
ZRelocationSetSelectorGroup _medium;
ZRelocationSetSelectorGroup _large;
ZArray<ZPage*> _garbage_pages;
size_t total() const;
size_t empty() const;
@ -116,6 +117,10 @@ public:
void register_live_page(ZPage* page);
void register_garbage_page(ZPage* page);
bool should_free_garbage_pages(int bulk) const;
const ZArray<ZPage*>* garbage_pages() const;
void clear_garbage_pages();
void select();
const ZArray<ZPage*>* small() const;

@ -24,6 +24,8 @@
#ifndef SHARE_GC_Z_ZRELOCATIONSETSELECTOR_INLINE_HPP
#define SHARE_GC_Z_ZRELOCATIONSETSELECTOR_INLINE_HPP
#include "gc/z/zArray.inline.hpp"
#include "gc/z/zPage.inline.hpp"
#include "gc/z/zRelocationSetSelector.hpp"
inline size_t ZRelocationSetSelectorGroupStats::npages() const {
@ -66,6 +68,31 @@ inline const ZRelocationSetSelectorGroupStats& ZRelocationSetSelectorStats::larg
return _large;
}
inline void ZRelocationSetSelectorGroup::register_live_page(ZPage* page) {
const uint8_t type = page->type();
const size_t size = page->size();
const size_t live = page->live_bytes();
const size_t garbage = size - live;
if (garbage > _fragmentation_limit) {
_registered_pages.append(page);
}
_stats._npages++;
_stats._total += size;
_stats._live += live;
_stats._garbage += garbage;
}
inline void ZRelocationSetSelectorGroup::register_garbage_page(ZPage* page) {
const size_t size = page->size();
_stats._npages++;
_stats._total += size;
_stats._garbage += size;
_stats._empty += size;
}
inline const ZArray<ZPage*>* ZRelocationSetSelectorGroup::selected() const {
return &_registered_pages;
}
@ -78,6 +105,44 @@ inline const ZRelocationSetSelectorGroupStats& ZRelocationSetSelectorGroup::stat
return _stats;
}
inline void ZRelocationSetSelector::register_live_page(ZPage* page) {
const uint8_t type = page->type();
if (type == ZPageTypeSmall) {
_small.register_live_page(page);
} else if (type == ZPageTypeMedium) {
_medium.register_live_page(page);
} else {
_large.register_live_page(page);
}
}
inline void ZRelocationSetSelector::register_garbage_page(ZPage* page) {
const uint8_t type = page->type();
if (type == ZPageTypeSmall) {
_small.register_garbage_page(page);
} else if (type == ZPageTypeMedium) {
_medium.register_garbage_page(page);
} else {
_large.register_garbage_page(page);
}
_garbage_pages.append(page);
}
inline bool ZRelocationSetSelector::should_free_garbage_pages(int bulk) const {
return _garbage_pages.length() >= bulk && _garbage_pages.is_nonempty();
}
inline const ZArray<ZPage*>* ZRelocationSetSelector::garbage_pages() const {
return &_garbage_pages;
}
inline void ZRelocationSetSelector::clear_garbage_pages() {
return _garbage_pages.clear();
}
inline size_t ZRelocationSetSelector::total() const {
return _small.stats().total() + _medium.stats().total() + _large.stats().total();
}