8256390: ZGC: Relocate in-place instead of having a heap reserve
Reviewed-by: stefank, eosterlund
This commit is contained in:
parent
1df94c9fff
commit
372595cca3
@ -31,31 +31,25 @@
|
||||
// Allocation flags layout
|
||||
// -----------------------
|
||||
//
|
||||
// 7 4 3 2 1 0
|
||||
// +---+-+-+-+-+-+
|
||||
// |000|1|1|1|1|1|
|
||||
// +---+-+-+-+-+-+
|
||||
// | | | | | |
|
||||
// | | | | | * 0-0 Worker Thread Flag (1-bit)
|
||||
// | | | | |
|
||||
// | | | | * 1-1 Non-Blocking Flag (1-bit)
|
||||
// | | | |
|
||||
// | | | * 2-2 Relocation Flag (1-bit)
|
||||
// | | |
|
||||
// | | * 3-3 No Reserve Flag (1-bit)
|
||||
// | |
|
||||
// | * 4-4 Low Address Flag (1-bit)
|
||||
// 7 2 1 0
|
||||
// +-----+-+-+-+
|
||||
// |00000|1|1|1|
|
||||
// +-----+-+-+-+
|
||||
// | | | |
|
||||
// | | | * 0-0 Non-Blocking Flag (1-bit)
|
||||
// | | |
|
||||
// | | * 1-1 Worker Relocation Flag (1-bit)
|
||||
// | |
|
||||
// | * 2-2 Low Address Flag (1-bit)
|
||||
// |
|
||||
// * 7-5 Unused (3-bits)
|
||||
// * 7-3 Unused (5-bits)
|
||||
//
|
||||
|
||||
class ZAllocationFlags {
|
||||
private:
|
||||
typedef ZBitField<uint8_t, bool, 0, 1> field_worker_thread;
|
||||
typedef ZBitField<uint8_t, bool, 1, 1> field_non_blocking;
|
||||
typedef ZBitField<uint8_t, bool, 2, 1> field_relocation;
|
||||
typedef ZBitField<uint8_t, bool, 3, 1> field_no_reserve;
|
||||
typedef ZBitField<uint8_t, bool, 4, 1> field_low_address;
|
||||
typedef ZBitField<uint8_t, bool, 0, 1> field_non_blocking;
|
||||
typedef ZBitField<uint8_t, bool, 1, 1> field_worker_relocation;
|
||||
typedef ZBitField<uint8_t, bool, 2, 1> field_low_address;
|
||||
|
||||
uint8_t _flags;
|
||||
|
||||
@ -63,40 +57,24 @@ public:
|
||||
ZAllocationFlags() :
|
||||
_flags(0) {}
|
||||
|
||||
void set_worker_thread() {
|
||||
_flags |= field_worker_thread::encode(true);
|
||||
}
|
||||
|
||||
void set_non_blocking() {
|
||||
_flags |= field_non_blocking::encode(true);
|
||||
}
|
||||
|
||||
void set_relocation() {
|
||||
_flags |= field_relocation::encode(true);
|
||||
}
|
||||
|
||||
void set_no_reserve() {
|
||||
_flags |= field_no_reserve::encode(true);
|
||||
void set_worker_relocation() {
|
||||
_flags |= field_worker_relocation::encode(true);
|
||||
}
|
||||
|
||||
void set_low_address() {
|
||||
_flags |= field_low_address::encode(true);
|
||||
}
|
||||
|
||||
bool worker_thread() const {
|
||||
return field_worker_thread::decode(_flags);
|
||||
}
|
||||
|
||||
bool non_blocking() const {
|
||||
return field_non_blocking::decode(_flags);
|
||||
}
|
||||
|
||||
bool relocation() const {
|
||||
return field_relocation::decode(_flags);
|
||||
}
|
||||
|
||||
bool no_reserve() const {
|
||||
return field_no_reserve::decode(_flags);
|
||||
bool worker_relocation() const {
|
||||
return field_worker_relocation::decode(_flags);
|
||||
}
|
||||
|
||||
bool low_address() const {
|
||||
|
@ -71,18 +71,6 @@ void ZArguments::initialize() {
|
||||
vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ConcGCThreads=0");
|
||||
}
|
||||
|
||||
// Select medium page size so that we can calculate the max reserve
|
||||
ZHeuristics::set_medium_page_size();
|
||||
|
||||
// MinHeapSize/InitialHeapSize must be at least as large as the max reserve
|
||||
const size_t max_reserve = ZHeuristics::max_reserve();
|
||||
if (MinHeapSize < max_reserve) {
|
||||
FLAG_SET_ERGO(MinHeapSize, max_reserve);
|
||||
}
|
||||
if (InitialHeapSize < max_reserve) {
|
||||
FLAG_SET_ERGO(InitialHeapSize, max_reserve);
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
// Enable loop strip mining by default
|
||||
if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
|
||||
|
@ -25,12 +25,14 @@
|
||||
#include "gc/z/zCollectedHeap.hpp"
|
||||
#include "gc/z/zDirector.hpp"
|
||||
#include "gc/z/zHeap.inline.hpp"
|
||||
#include "gc/z/zHeuristics.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "logging/log.hpp"
|
||||
|
||||
const double ZDirector::one_in_1000 = 3.290527;
|
||||
|
||||
ZDirector::ZDirector() :
|
||||
_relocation_headroom(ZHeuristics::relocation_headroom()),
|
||||
_metronome(ZStatAllocRate::sample_hz) {
|
||||
set_name("ZDirector");
|
||||
create_and_start();
|
||||
@ -95,14 +97,12 @@ bool ZDirector::rule_allocation_rate() const {
|
||||
// margin based on variations in the allocation rate and unforeseen
|
||||
// allocation spikes.
|
||||
|
||||
// Calculate amount of free memory available to Java threads. Note that
|
||||
// the heap reserve is not available to Java threads and is therefore not
|
||||
// considered part of the free memory.
|
||||
// Calculate amount of free memory available. Note that we take the
|
||||
// relocation headroom into account to avoid in-place relocation.
|
||||
const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity();
|
||||
const size_t max_reserve = ZHeap::heap()->max_reserve();
|
||||
const size_t used = ZHeap::heap()->used();
|
||||
const size_t free_with_reserve = soft_max_capacity - MIN2(soft_max_capacity, used);
|
||||
const size_t free = free_with_reserve - MIN2(free_with_reserve, max_reserve);
|
||||
const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used);
|
||||
const size_t free = free_including_headroom - MIN2(free_including_headroom, _relocation_headroom);
|
||||
|
||||
// Calculate time until OOM given the max allocation rate and the amount
|
||||
// of free memory. The allocation rate is a moving average and we multiply
|
||||
@ -179,14 +179,12 @@ bool ZDirector::rule_high_usage() const {
|
||||
// memory is still slowly but surely heading towards zero. In this situation,
|
||||
// we start a GC cycle to avoid a potential allocation stall later.
|
||||
|
||||
// Calculate amount of free memory available to Java threads. Note that
|
||||
// the heap reserve is not available to Java threads and is therefore not
|
||||
// considered part of the free memory.
|
||||
// Calculate amount of free memory available. Note that we take the
|
||||
// relocation headroom into account to avoid in-place relocation.
|
||||
const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity();
|
||||
const size_t max_reserve = ZHeap::heap()->max_reserve();
|
||||
const size_t used = ZHeap::heap()->used();
|
||||
const size_t free_with_reserve = soft_max_capacity - MIN2(soft_max_capacity, used);
|
||||
const size_t free = free_with_reserve - MIN2(free_with_reserve, max_reserve);
|
||||
const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used);
|
||||
const size_t free = free_including_headroom - MIN2(free_including_headroom, _relocation_headroom);
|
||||
const double free_percent = percent_of(free, soft_max_capacity);
|
||||
|
||||
log_debug(gc, director)("Rule: High Usage, Free: " SIZE_FORMAT "MB(%.1f%%)",
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,7 +32,8 @@ class ZDirector : public ConcurrentGCThread {
|
||||
private:
|
||||
static const double one_in_1000;
|
||||
|
||||
ZMetronome _metronome;
|
||||
const size_t _relocation_headroom;
|
||||
ZMetronome _metronome;
|
||||
|
||||
void sample_allocation_rate() const;
|
||||
|
||||
|
@ -22,14 +22,144 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zForwarding.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zUtils.inline.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
//
|
||||
// Reference count states:
|
||||
//
|
||||
// * If the reference count is zero, it will never change again.
|
||||
//
|
||||
// * If the reference count is positive, it can be both retained
|
||||
// (increased) and released (decreased).
|
||||
//
|
||||
// * If the reference count is negative, is can only be released
|
||||
// (increased). A negative reference count means that one or more
|
||||
// threads are waiting for one or more other threads to release
|
||||
// their references.
|
||||
//
|
||||
// The reference lock is used for waiting until the reference
|
||||
// count has become zero (released) or negative one (claimed).
|
||||
//
|
||||
|
||||
static const ZStatCriticalPhase ZCriticalPhaseRelocationStall("Relocation Stall");
|
||||
|
||||
bool ZForwarding::retain_page() {
|
||||
for (;;) {
|
||||
const int32_t ref_count = Atomic::load_acquire(&_ref_count);
|
||||
|
||||
if (ref_count == 0) {
|
||||
// Released
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ref_count < 0) {
|
||||
// Claimed
|
||||
wait_page_released();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Atomic::cmpxchg(&_ref_count, ref_count, ref_count + 1) == ref_count) {
|
||||
// Retained
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ZPage* ZForwarding::claim_page() {
|
||||
for (;;) {
|
||||
const int32_t ref_count = Atomic::load(&_ref_count);
|
||||
assert(ref_count > 0, "Invalid state");
|
||||
|
||||
// Invert reference count
|
||||
if (Atomic::cmpxchg(&_ref_count, ref_count, -ref_count) != ref_count) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If the previous reference count was 1, then we just changed it to -1,
|
||||
// and we have now claimed the page. Otherwise we wait until it is claimed.
|
||||
if (ref_count != 1) {
|
||||
ZLocker<ZConditionLock> locker(&_ref_lock);
|
||||
while (Atomic::load_acquire(&_ref_count) != -1) {
|
||||
_ref_lock.wait();
|
||||
}
|
||||
}
|
||||
|
||||
return _page;
|
||||
}
|
||||
}
|
||||
|
||||
void ZForwarding::release_page() {
|
||||
for (;;) {
|
||||
const int32_t ref_count = Atomic::load(&_ref_count);
|
||||
assert(ref_count != 0, "Invalid state");
|
||||
|
||||
if (ref_count > 0) {
|
||||
// Decrement reference count
|
||||
if (Atomic::cmpxchg(&_ref_count, ref_count, ref_count - 1) != ref_count) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If the previous reference count was 1, then we just decremented
|
||||
// it to 0 and we should signal that the page is now released.
|
||||
if (ref_count == 1) {
|
||||
// Notify released
|
||||
ZLocker<ZConditionLock> locker(&_ref_lock);
|
||||
_ref_lock.notify_all();
|
||||
}
|
||||
} else {
|
||||
// Increment reference count
|
||||
if (Atomic::cmpxchg(&_ref_count, ref_count, ref_count + 1) != ref_count) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If the previous reference count was -2 or -1, then we just incremented it
|
||||
// to -1 or 0, and we should signal the that page is now claimed or released.
|
||||
if (ref_count == -2 || ref_count == -1) {
|
||||
// Notify claimed or released
|
||||
ZLocker<ZConditionLock> locker(&_ref_lock);
|
||||
_ref_lock.notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void ZForwarding::wait_page_released() const {
|
||||
if (Atomic::load_acquire(&_ref_count) != 0) {
|
||||
ZStatTimer timer(ZCriticalPhaseRelocationStall);
|
||||
ZLocker<ZConditionLock> locker(&_ref_lock);
|
||||
while (Atomic::load_acquire(&_ref_count) != 0) {
|
||||
_ref_lock.wait();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ZPage* ZForwarding::detach_page() {
|
||||
// Wait until released
|
||||
if (Atomic::load_acquire(&_ref_count) != 0) {
|
||||
ZLocker<ZConditionLock> locker(&_ref_lock);
|
||||
while (Atomic::load_acquire(&_ref_count) != 0) {
|
||||
_ref_lock.wait();
|
||||
}
|
||||
}
|
||||
|
||||
// Detach and return page
|
||||
ZPage* const page = _page;
|
||||
_page = NULL;
|
||||
return page;
|
||||
}
|
||||
|
||||
void ZForwarding::verify() const {
|
||||
guarantee(_refcount > 0, "Invalid refcount");
|
||||
guarantee(_ref_count != 0, "Invalid reference count");
|
||||
guarantee(_page != NULL, "Invalid page");
|
||||
|
||||
size_t live_objects = 0;
|
||||
uint32_t live_objects = 0;
|
||||
size_t live_bytes = 0;
|
||||
|
||||
for (ZForwardingCursor i = 0; i < _entries.length(); i++) {
|
||||
const ZForwardingEntry entry = at(&i);
|
||||
@ -53,9 +183,13 @@ void ZForwarding::verify() const {
|
||||
guarantee(entry.to_offset() != other.to_offset(), "Duplicate to");
|
||||
}
|
||||
|
||||
const uintptr_t to_addr = ZAddress::good(entry.to_offset());
|
||||
const size_t size = ZUtils::object_size(to_addr);
|
||||
const size_t aligned_size = align_up(size, _page->object_alignment());
|
||||
live_bytes += aligned_size;
|
||||
live_objects++;
|
||||
}
|
||||
|
||||
// Check number of non-empty entries
|
||||
guarantee(live_objects == _page->live_objects(), "Invalid number of entries");
|
||||
// Verify number of live objects and bytes
|
||||
_page->verify_live(live_objects, live_bytes);
|
||||
}
|
||||
|
@ -26,8 +26,10 @@
|
||||
|
||||
#include "gc/z/zAttachedArray.hpp"
|
||||
#include "gc/z/zForwardingEntry.hpp"
|
||||
#include "gc/z/zLock.hpp"
|
||||
#include "gc/z/zVirtualMemory.hpp"
|
||||
|
||||
class ObjectClosure;
|
||||
class ZForwardingAllocator;
|
||||
class ZPage;
|
||||
|
||||
@ -40,15 +42,13 @@ class ZForwarding {
|
||||
private:
|
||||
typedef ZAttachedArray<ZForwarding, ZForwardingEntry> AttachedArray;
|
||||
|
||||
const ZVirtualMemory _virtual;
|
||||
const size_t _object_alignment_shift;
|
||||
const AttachedArray _entries;
|
||||
ZPage* _page;
|
||||
volatile uint32_t _refcount;
|
||||
volatile bool _pinned;
|
||||
|
||||
bool inc_refcount();
|
||||
bool dec_refcount();
|
||||
const ZVirtualMemory _virtual;
|
||||
const size_t _object_alignment_shift;
|
||||
const AttachedArray _entries;
|
||||
ZPage* _page;
|
||||
mutable ZConditionLock _ref_lock;
|
||||
volatile int32_t _ref_count;
|
||||
bool _in_place;
|
||||
|
||||
ZForwardingEntry* entries() const;
|
||||
ZForwardingEntry at(ZForwardingCursor* cursor) const;
|
||||
@ -61,18 +61,21 @@ public:
|
||||
static uint32_t nentries(const ZPage* page);
|
||||
static ZForwarding* alloc(ZForwardingAllocator* allocator, ZPage* page);
|
||||
|
||||
uint8_t type() const;
|
||||
uintptr_t start() const;
|
||||
size_t size() const;
|
||||
size_t object_alignment_shift() const;
|
||||
ZPage* page() const;
|
||||
|
||||
bool is_pinned() const;
|
||||
void set_pinned();
|
||||
void object_iterate(ObjectClosure *cl);
|
||||
|
||||
bool retain_page();
|
||||
ZPage* claim_page();
|
||||
void release_page();
|
||||
void wait_page_released() const;
|
||||
ZPage* detach_page();
|
||||
|
||||
void set_in_place();
|
||||
bool in_place() const;
|
||||
|
||||
ZForwardingEntry find(uintptr_t from_index) const;
|
||||
ZForwardingEntry find(uintptr_t from_index, ZForwardingCursor* cursor) const;
|
||||
uintptr_t insert(uintptr_t from_index, uintptr_t to_offset, ZForwardingCursor* cursor);
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "gc/z/zForwardingAllocator.inline.hpp"
|
||||
#include "gc/z/zHash.inline.hpp"
|
||||
#include "gc/z/zHeap.hpp"
|
||||
#include "gc/z/zLock.inline.hpp"
|
||||
#include "gc/z/zPage.inline.hpp"
|
||||
#include "gc/z/zVirtualMemory.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
@ -56,8 +57,13 @@ inline ZForwarding::ZForwarding(ZPage* page, size_t nentries) :
|
||||
_object_alignment_shift(page->object_alignment_shift()),
|
||||
_entries(nentries),
|
||||
_page(page),
|
||||
_refcount(1),
|
||||
_pinned(false) {}
|
||||
_ref_lock(),
|
||||
_ref_count(1),
|
||||
_in_place(false) {}
|
||||
|
||||
inline uint8_t ZForwarding::type() const {
|
||||
return _page->type();
|
||||
}
|
||||
|
||||
inline uintptr_t ZForwarding::start() const {
|
||||
return _virtual.start();
|
||||
@ -71,49 +77,16 @@ inline size_t ZForwarding::object_alignment_shift() const {
|
||||
return _object_alignment_shift;
|
||||
}
|
||||
|
||||
inline ZPage* ZForwarding::page() const {
|
||||
return _page;
|
||||
inline void ZForwarding::object_iterate(ObjectClosure *cl) {
|
||||
return _page->object_iterate(cl);
|
||||
}
|
||||
|
||||
inline bool ZForwarding::is_pinned() const {
|
||||
return Atomic::load(&_pinned);
|
||||
inline void ZForwarding::set_in_place() {
|
||||
_in_place = true;
|
||||
}
|
||||
|
||||
inline void ZForwarding::set_pinned() {
|
||||
Atomic::store(&_pinned, true);
|
||||
}
|
||||
|
||||
inline bool ZForwarding::inc_refcount() {
|
||||
uint32_t refcount = Atomic::load(&_refcount);
|
||||
|
||||
while (refcount > 0) {
|
||||
const uint32_t old_refcount = refcount;
|
||||
const uint32_t new_refcount = old_refcount + 1;
|
||||
const uint32_t prev_refcount = Atomic::cmpxchg(&_refcount, old_refcount, new_refcount);
|
||||
if (prev_refcount == old_refcount) {
|
||||
return true;
|
||||
}
|
||||
|
||||
refcount = prev_refcount;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
inline bool ZForwarding::dec_refcount() {
|
||||
assert(_refcount > 0, "Invalid state");
|
||||
return Atomic::sub(&_refcount, 1u) == 0u;
|
||||
}
|
||||
|
||||
inline bool ZForwarding::retain_page() {
|
||||
return inc_refcount();
|
||||
}
|
||||
|
||||
inline void ZForwarding::release_page() {
|
||||
if (dec_refcount()) {
|
||||
ZHeap::heap()->free_page(_page, true /* reclaimed */);
|
||||
_page = NULL;
|
||||
}
|
||||
inline bool ZForwarding::in_place() const {
|
||||
return _in_place;
|
||||
}
|
||||
|
||||
inline ZForwardingEntry* ZForwarding::entries() const {
|
||||
@ -137,11 +110,6 @@ inline ZForwardingEntry ZForwarding::next(ZForwardingCursor* cursor) const {
|
||||
return at(cursor);
|
||||
}
|
||||
|
||||
inline ZForwardingEntry ZForwarding::find(uintptr_t from_index) const {
|
||||
ZForwardingCursor dummy;
|
||||
return find(from_index, &dummy);
|
||||
}
|
||||
|
||||
inline ZForwardingEntry ZForwarding::find(uintptr_t from_index, ZForwardingCursor* cursor) const {
|
||||
// Reading entries in the table races with the atomic CAS done for
|
||||
// insertion into the table. This is safe because each entry is at
|
||||
|
@ -56,7 +56,7 @@ ZHeap* ZHeap::_heap = NULL;
|
||||
ZHeap::ZHeap() :
|
||||
_workers(),
|
||||
_object_allocator(),
|
||||
_page_allocator(&_workers, MinHeapSize, InitialHeapSize, MaxHeapSize, ZHeuristics::max_reserve()),
|
||||
_page_allocator(&_workers, MinHeapSize, InitialHeapSize, MaxHeapSize),
|
||||
_page_table(),
|
||||
_forwarding_table(),
|
||||
_mark(&_workers, &_page_table),
|
||||
@ -71,7 +71,7 @@ ZHeap::ZHeap() :
|
||||
_heap = this;
|
||||
|
||||
// Update statistics
|
||||
ZStatHeap::set_at_initialize(min_capacity(), max_capacity(), max_reserve());
|
||||
ZStatHeap::set_at_initialize(_page_allocator.stats());
|
||||
}
|
||||
|
||||
bool ZHeap::is_initialized() const {
|
||||
@ -94,18 +94,6 @@ size_t ZHeap::capacity() const {
|
||||
return _page_allocator.capacity();
|
||||
}
|
||||
|
||||
size_t ZHeap::max_reserve() const {
|
||||
return _page_allocator.max_reserve();
|
||||
}
|
||||
|
||||
size_t ZHeap::used_high() const {
|
||||
return _page_allocator.used_high();
|
||||
}
|
||||
|
||||
size_t ZHeap::used_low() const {
|
||||
return _page_allocator.used_low();
|
||||
}
|
||||
|
||||
size_t ZHeap::used() const {
|
||||
return _page_allocator.used();
|
||||
}
|
||||
@ -114,14 +102,6 @@ size_t ZHeap::unused() const {
|
||||
return _page_allocator.unused();
|
||||
}
|
||||
|
||||
size_t ZHeap::allocated() const {
|
||||
return _page_allocator.allocated();
|
||||
}
|
||||
|
||||
size_t ZHeap::reclaimed() const {
|
||||
return _page_allocator.reclaimed();
|
||||
}
|
||||
|
||||
size_t ZHeap::tlab_capacity() const {
|
||||
return capacity();
|
||||
}
|
||||
@ -261,7 +241,7 @@ void ZHeap::mark_start() {
|
||||
_mark.start();
|
||||
|
||||
// Update statistics
|
||||
ZStatHeap::set_at_mark_start(soft_max_capacity(), capacity(), used());
|
||||
ZStatHeap::set_at_mark_start(_page_allocator.stats());
|
||||
}
|
||||
|
||||
void ZHeap::mark(bool initial) {
|
||||
@ -288,7 +268,7 @@ bool ZHeap::mark_end() {
|
||||
ZVerify::after_mark();
|
||||
|
||||
// Update statistics
|
||||
ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
|
||||
ZStatHeap::set_at_mark_end(_page_allocator.stats());
|
||||
|
||||
// Block resurrection of weak/phantom references
|
||||
ZResurrection::block();
|
||||
@ -409,7 +389,7 @@ void ZHeap::select_relocation_set() {
|
||||
|
||||
// Update statistics
|
||||
ZStatRelocation::set_at_select_relocation_set(selector.stats());
|
||||
ZStatHeap::set_at_select_relocation_set(selector.stats(), reclaimed());
|
||||
ZStatHeap::set_at_select_relocation_set(selector.stats());
|
||||
}
|
||||
|
||||
void ZHeap::reset_relocation_set() {
|
||||
@ -436,7 +416,7 @@ void ZHeap::relocate_start() {
|
||||
ZGlobalPhase = ZPhaseRelocate;
|
||||
|
||||
// Update statistics
|
||||
ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
|
||||
ZStatHeap::set_at_relocate_start(_page_allocator.stats());
|
||||
|
||||
// Notify JVMTI
|
||||
JvmtiTagMap::set_needs_rehashing();
|
||||
@ -444,12 +424,10 @@ void ZHeap::relocate_start() {
|
||||
|
||||
void ZHeap::relocate() {
|
||||
// Relocate relocation set
|
||||
const bool success = _relocate.relocate(&_relocation_set);
|
||||
_relocate.relocate(&_relocation_set);
|
||||
|
||||
// Update statistics
|
||||
ZStatRelocation::set_at_relocate_end(success);
|
||||
ZStatHeap::set_at_relocate_end(capacity(), allocated(), reclaimed(),
|
||||
used(), used_high(), used_low());
|
||||
ZStatHeap::set_at_relocate_end(_page_allocator.stats());
|
||||
}
|
||||
|
||||
void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) {
|
||||
|
@ -81,13 +81,8 @@ public:
|
||||
size_t max_capacity() const;
|
||||
size_t soft_max_capacity() const;
|
||||
size_t capacity() const;
|
||||
size_t max_reserve() const;
|
||||
size_t used_high() const;
|
||||
size_t used_low() const;
|
||||
size_t used() const;
|
||||
size_t unused() const;
|
||||
size_t allocated() const;
|
||||
size_t reclaimed() const;
|
||||
|
||||
size_t tlab_capacity() const;
|
||||
size_t tlab_used() const;
|
||||
@ -119,8 +114,8 @@ public:
|
||||
// Object allocation
|
||||
uintptr_t alloc_tlab(size_t size);
|
||||
uintptr_t alloc_object(size_t size);
|
||||
uintptr_t alloc_object_for_relocation(size_t size);
|
||||
void undo_alloc_object_for_relocation(uintptr_t addr, size_t size);
|
||||
uintptr_t alloc_object_non_blocking(size_t size);
|
||||
void undo_alloc_object(uintptr_t addr, size_t size);
|
||||
bool is_alloc_stalled() const;
|
||||
void check_out_of_memory();
|
||||
|
||||
|
@ -25,7 +25,6 @@
|
||||
#define SHARE_GC_Z_ZHEAP_INLINE_HPP
|
||||
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zForwarding.inline.hpp"
|
||||
#include "gc/z/zForwardingTable.inline.hpp"
|
||||
#include "gc/z/zHash.inline.hpp"
|
||||
#include "gc/z/zHeap.hpp"
|
||||
@ -80,15 +79,15 @@ inline uintptr_t ZHeap::alloc_object(size_t size) {
|
||||
return addr;
|
||||
}
|
||||
|
||||
inline uintptr_t ZHeap::alloc_object_for_relocation(size_t size) {
|
||||
uintptr_t addr = _object_allocator.alloc_object_for_relocation(size);
|
||||
inline uintptr_t ZHeap::alloc_object_non_blocking(size_t size) {
|
||||
uintptr_t addr = _object_allocator.alloc_object_non_blocking(size);
|
||||
assert(ZAddress::is_good_or_null(addr), "Bad address");
|
||||
return addr;
|
||||
}
|
||||
|
||||
inline void ZHeap::undo_alloc_object_for_relocation(uintptr_t addr, size_t size) {
|
||||
inline void ZHeap::undo_alloc_object(uintptr_t addr, size_t size) {
|
||||
ZPage* const page = _page_table.get(addr);
|
||||
_object_allocator.undo_alloc_object_for_relocation(page, addr, size);
|
||||
_object_allocator.undo_alloc_object(page, addr, size);
|
||||
}
|
||||
|
||||
inline uintptr_t ZHeap::relocate_object(uintptr_t addr) {
|
||||
@ -101,13 +100,7 @@ inline uintptr_t ZHeap::relocate_object(uintptr_t addr) {
|
||||
}
|
||||
|
||||
// Relocate object
|
||||
const bool retained = forwarding->retain_page();
|
||||
const uintptr_t new_addr = _relocate.relocate_object(forwarding, addr);
|
||||
if (retained) {
|
||||
forwarding->release_page();
|
||||
}
|
||||
|
||||
return new_addr;
|
||||
return _relocate.relocate_object(forwarding, ZAddress::good(addr));
|
||||
}
|
||||
|
||||
inline uintptr_t ZHeap::remap_object(uintptr_t addr) {
|
||||
@ -121,7 +114,7 @@ inline uintptr_t ZHeap::remap_object(uintptr_t addr) {
|
||||
}
|
||||
|
||||
// Forward object
|
||||
return _relocate.forward_object(forwarding, addr);
|
||||
return _relocate.forward_object(forwarding, ZAddress::good(addr));
|
||||
}
|
||||
|
||||
inline bool ZHeap::is_alloc_stalled() const {
|
||||
|
@ -52,13 +52,10 @@ void ZHeuristics::set_medium_page_size() {
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZHeuristics::max_reserve() {
|
||||
// Reserve one small page per worker plus one shared medium page. This is
|
||||
// still just an estimate and doesn't guarantee that we can't run out of
|
||||
// memory during relocation.
|
||||
const uint nworkers = MAX2(ParallelGCThreads, ConcGCThreads);
|
||||
const size_t reserve = (nworkers * ZPageSizeSmall) + ZPageSizeMedium;
|
||||
return MIN2(MaxHeapSize, reserve);
|
||||
size_t ZHeuristics::relocation_headroom() {
|
||||
// Calculate headroom needed to avoid in-place relocation. Each worker will try
|
||||
// to allocate a small page, and all workers will share a single medium page.
|
||||
return (MAX2(ParallelGCThreads, ConcGCThreads) * ZPageSizeSmall) + ZPageSizeMedium;
|
||||
}
|
||||
|
||||
bool ZHeuristics::use_per_cpu_shared_small_pages() {
|
||||
@ -73,15 +70,14 @@ static uint nworkers_based_on_ncpus(double cpu_share_in_percent) {
|
||||
return ceil(os::initial_active_processor_count() * cpu_share_in_percent / 100.0);
|
||||
}
|
||||
|
||||
static uint nworkers_based_on_heap_size(double reserve_share_in_percent) {
|
||||
const int nworkers = (MaxHeapSize * (reserve_share_in_percent / 100.0)) / ZPageSizeSmall;
|
||||
static uint nworkers_based_on_heap_size(double heap_share_in_percent) {
|
||||
const int nworkers = (MaxHeapSize * (heap_share_in_percent / 100.0)) / ZPageSizeSmall;
|
||||
return MAX2(nworkers, 1);
|
||||
}
|
||||
|
||||
static uint nworkers(double cpu_share_in_percent) {
|
||||
// Cap number of workers so that we don't use more than 2% of the max heap
|
||||
// for the small page reserve. This is useful when using small heaps on
|
||||
// large machines.
|
||||
// Cap number of workers so that they don't use more than 2% of the max heap
|
||||
// during relocation. This is useful when using small heaps on large machines.
|
||||
return MIN2(nworkers_based_on_ncpus(cpu_share_in_percent),
|
||||
nworkers_based_on_heap_size(2.0));
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ class ZHeuristics : public AllStatic {
|
||||
public:
|
||||
static void set_medium_page_size();
|
||||
|
||||
static size_t max_reserve();
|
||||
static size_t relocation_headroom();
|
||||
|
||||
static bool use_per_cpu_shared_small_pages();
|
||||
|
||||
|
@ -50,6 +50,7 @@ ZInitialize::ZInitialize(ZBarrierSet* barrier_set) {
|
||||
ZThreadLocalAllocBuffer::initialize();
|
||||
ZTracer::initialize();
|
||||
ZLargePages::initialize();
|
||||
ZHeuristics::set_medium_page_size();
|
||||
ZBarrierSet::set_barrier_set(barrier_set);
|
||||
|
||||
pd_initialize();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -143,11 +143,14 @@ inline void ZLiveMap::iterate_segment(ObjectClosure* cl, BitMap::idx_t segment,
|
||||
// Calculate object address
|
||||
const uintptr_t addr = page_start + ((index / 2) << page_object_alignment_shift);
|
||||
|
||||
// Get the size of the object before calling the closure, which
|
||||
// might overwrite the object in case we are relocating in-place.
|
||||
const size_t size = ZUtils::object_size(addr);
|
||||
|
||||
// Apply closure
|
||||
cl->do_object(ZOop::from_address(addr));
|
||||
|
||||
// Find next bit after this object
|
||||
const size_t size = ZUtils::object_size(addr);
|
||||
const uintptr_t next_addr = align_up(addr + size, 1 << page_object_alignment_shift);
|
||||
const BitMap::idx_t next_index = ((next_addr - page_start) >> page_object_alignment_shift) * 2;
|
||||
if (next_index >= end_index) {
|
||||
|
@ -44,8 +44,7 @@ ZObjectAllocator::ZObjectAllocator() :
|
||||
_used(0),
|
||||
_undone(0),
|
||||
_shared_medium_page(NULL),
|
||||
_shared_small_page(NULL),
|
||||
_worker_small_page(NULL) {}
|
||||
_shared_small_page(NULL) {}
|
||||
|
||||
ZPage** ZObjectAllocator::shared_small_page_addr() {
|
||||
return _use_per_cpu_shared_small_pages ? _shared_small_page.addr() : _shared_small_page.addr(0);
|
||||
@ -122,8 +121,6 @@ uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page,
|
||||
}
|
||||
|
||||
uintptr_t ZObjectAllocator::alloc_large_object(size_t size, ZAllocationFlags flags) {
|
||||
assert(ZThread::is_java(), "Should be a Java thread");
|
||||
|
||||
uintptr_t addr = 0;
|
||||
|
||||
// Allocate new large page
|
||||
@ -141,43 +138,8 @@ uintptr_t ZObjectAllocator::alloc_medium_object(size_t size, ZAllocationFlags fl
|
||||
return alloc_object_in_shared_page(_shared_medium_page.addr(), ZPageTypeMedium, ZPageSizeMedium, size, flags);
|
||||
}
|
||||
|
||||
uintptr_t ZObjectAllocator::alloc_small_object_from_nonworker(size_t size, ZAllocationFlags flags) {
|
||||
assert(!ZThread::is_worker(), "Should not be a worker thread");
|
||||
|
||||
// Non-worker small page allocation can never use the reserve
|
||||
flags.set_no_reserve();
|
||||
|
||||
return alloc_object_in_shared_page(shared_small_page_addr(), ZPageTypeSmall, ZPageSizeSmall, size, flags);
|
||||
}
|
||||
|
||||
uintptr_t ZObjectAllocator::alloc_small_object_from_worker(size_t size, ZAllocationFlags flags) {
|
||||
assert(ZThread::is_worker(), "Should be a worker thread");
|
||||
|
||||
ZPage* page = _worker_small_page.get();
|
||||
uintptr_t addr = 0;
|
||||
|
||||
if (page != NULL) {
|
||||
addr = page->alloc_object(size);
|
||||
}
|
||||
|
||||
if (addr == 0) {
|
||||
// Allocate new page
|
||||
page = alloc_page(ZPageTypeSmall, ZPageSizeSmall, flags);
|
||||
if (page != NULL) {
|
||||
addr = page->alloc_object(size);
|
||||
}
|
||||
_worker_small_page.set(page);
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
uintptr_t ZObjectAllocator::alloc_small_object(size_t size, ZAllocationFlags flags) {
|
||||
if (flags.worker_thread()) {
|
||||
return alloc_small_object_from_worker(size, flags);
|
||||
} else {
|
||||
return alloc_small_object_from_nonworker(size, flags);
|
||||
}
|
||||
return alloc_object_in_shared_page(shared_small_page_addr(), ZPageTypeSmall, ZPageSizeSmall, size, flags);
|
||||
}
|
||||
|
||||
uintptr_t ZObjectAllocator::alloc_object(size_t size, ZAllocationFlags flags) {
|
||||
@ -194,85 +156,28 @@ uintptr_t ZObjectAllocator::alloc_object(size_t size, ZAllocationFlags flags) {
|
||||
}
|
||||
|
||||
uintptr_t ZObjectAllocator::alloc_object(size_t size) {
|
||||
assert(ZThread::is_java(), "Must be a Java thread");
|
||||
|
||||
ZAllocationFlags flags;
|
||||
flags.set_no_reserve();
|
||||
|
||||
return alloc_object(size, flags);
|
||||
}
|
||||
|
||||
uintptr_t ZObjectAllocator::alloc_object_for_relocation(size_t size) {
|
||||
uintptr_t ZObjectAllocator::alloc_object_non_blocking(size_t size) {
|
||||
ZAllocationFlags flags;
|
||||
flags.set_relocation();
|
||||
flags.set_non_blocking();
|
||||
|
||||
if (ZThread::is_worker()) {
|
||||
flags.set_worker_thread();
|
||||
}
|
||||
|
||||
return alloc_object(size, flags);
|
||||
}
|
||||
|
||||
bool ZObjectAllocator::undo_alloc_large_object(ZPage* page) {
|
||||
assert(page->type() == ZPageTypeLarge, "Invalid page type");
|
||||
|
||||
// Undo page allocation
|
||||
undo_alloc_page(page);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ZObjectAllocator::undo_alloc_medium_object(ZPage* page, uintptr_t addr, size_t size) {
|
||||
assert(page->type() == ZPageTypeMedium, "Invalid page type");
|
||||
|
||||
// Try atomic undo on shared page
|
||||
return page->undo_alloc_object_atomic(addr, size);
|
||||
}
|
||||
|
||||
bool ZObjectAllocator::undo_alloc_small_object_from_nonworker(ZPage* page, uintptr_t addr, size_t size) {
|
||||
assert(page->type() == ZPageTypeSmall, "Invalid page type");
|
||||
|
||||
// Try atomic undo on shared page
|
||||
return page->undo_alloc_object_atomic(addr, size);
|
||||
}
|
||||
|
||||
bool ZObjectAllocator::undo_alloc_small_object_from_worker(ZPage* page, uintptr_t addr, size_t size) {
|
||||
assert(page->type() == ZPageTypeSmall, "Invalid page type");
|
||||
assert(page == _worker_small_page.get(), "Invalid page");
|
||||
|
||||
// Non-atomic undo on worker-local page
|
||||
const bool success = page->undo_alloc_object(addr, size);
|
||||
assert(success, "Should always succeed");
|
||||
return success;
|
||||
}
|
||||
|
||||
bool ZObjectAllocator::undo_alloc_small_object(ZPage* page, uintptr_t addr, size_t size) {
|
||||
if (ZThread::is_worker()) {
|
||||
return undo_alloc_small_object_from_worker(page, addr, size);
|
||||
} else {
|
||||
return undo_alloc_small_object_from_nonworker(page, addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
bool ZObjectAllocator::undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) {
|
||||
void ZObjectAllocator::undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) {
|
||||
const uint8_t type = page->type();
|
||||
|
||||
if (type == ZPageTypeSmall) {
|
||||
return undo_alloc_small_object(page, addr, size);
|
||||
} else if (type == ZPageTypeMedium) {
|
||||
return undo_alloc_medium_object(page, addr, size);
|
||||
} else {
|
||||
return undo_alloc_large_object(page);
|
||||
}
|
||||
}
|
||||
|
||||
void ZObjectAllocator::undo_alloc_object_for_relocation(ZPage* page, uintptr_t addr, size_t size) {
|
||||
if (undo_alloc_object(page, addr, size)) {
|
||||
if (type == ZPageTypeLarge) {
|
||||
undo_alloc_page(page);
|
||||
ZStatInc(ZCounterUndoObjectAllocationSucceeded);
|
||||
} else {
|
||||
ZStatInc(ZCounterUndoObjectAllocationFailed);
|
||||
log_trace(gc)("Failed to undo object allocation: " PTR_FORMAT ", Size: " SIZE_FORMAT ", Thread: " PTR_FORMAT " (%s)",
|
||||
addr, size, ZThread::id(), ZThread::name());
|
||||
if (page->undo_alloc_object_atomic(addr, size)) {
|
||||
ZStatInc(ZCounterUndoObjectAllocationSucceeded);
|
||||
} else {
|
||||
ZStatInc(ZCounterUndoObjectAllocationFailed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -314,5 +219,4 @@ void ZObjectAllocator::retire_pages() {
|
||||
// Reset allocation pages
|
||||
_shared_medium_page.set(NULL);
|
||||
_shared_small_page.set_all(NULL);
|
||||
_worker_small_page.set_all(NULL);
|
||||
}
|
||||
|
@ -36,7 +36,6 @@ private:
|
||||
ZPerCPU<size_t> _undone;
|
||||
ZContended<ZPage*> _shared_medium_page;
|
||||
ZPerCPU<ZPage*> _shared_small_page;
|
||||
ZPerWorker<ZPage*> _worker_small_page;
|
||||
|
||||
ZPage** shared_small_page_addr();
|
||||
ZPage* const* shared_small_page_addr() const;
|
||||
@ -54,25 +53,15 @@ private:
|
||||
|
||||
uintptr_t alloc_large_object(size_t size, ZAllocationFlags flags);
|
||||
uintptr_t alloc_medium_object(size_t size, ZAllocationFlags flags);
|
||||
uintptr_t alloc_small_object_from_nonworker(size_t size, ZAllocationFlags flags);
|
||||
uintptr_t alloc_small_object_from_worker(size_t size, ZAllocationFlags flags);
|
||||
uintptr_t alloc_small_object(size_t size, ZAllocationFlags flags);
|
||||
uintptr_t alloc_object(size_t size, ZAllocationFlags flags);
|
||||
|
||||
bool undo_alloc_large_object(ZPage* page);
|
||||
bool undo_alloc_medium_object(ZPage* page, uintptr_t addr, size_t size);
|
||||
bool undo_alloc_small_object_from_nonworker(ZPage* page, uintptr_t addr, size_t size);
|
||||
bool undo_alloc_small_object_from_worker(ZPage* page, uintptr_t addr, size_t size);
|
||||
bool undo_alloc_small_object(ZPage* page, uintptr_t addr, size_t size);
|
||||
bool undo_alloc_object(ZPage* page, uintptr_t addr, size_t size);
|
||||
|
||||
public:
|
||||
ZObjectAllocator();
|
||||
|
||||
uintptr_t alloc_object(size_t size);
|
||||
|
||||
uintptr_t alloc_object_for_relocation(size_t size);
|
||||
void undo_alloc_object_for_relocation(ZPage* page, uintptr_t addr, size_t size);
|
||||
uintptr_t alloc_object_non_blocking(size_t size);
|
||||
void undo_alloc_object(ZPage* page, uintptr_t addr, size_t size);
|
||||
|
||||
size_t used() const;
|
||||
size_t remaining() const;
|
||||
|
@ -64,6 +64,11 @@ void ZPage::reset() {
|
||||
_last_used = 0;
|
||||
}
|
||||
|
||||
void ZPage::reset_for_in_place_relocation() {
|
||||
_seqnum = ZGlobalSeqNum;
|
||||
_top = start();
|
||||
}
|
||||
|
||||
ZPage* ZPage::retype(uint8_t type) {
|
||||
assert(_type != type, "Invalid retype");
|
||||
_type = type;
|
||||
@ -123,3 +128,8 @@ void ZPage::print_on(outputStream* out) const {
|
||||
void ZPage::print() const {
|
||||
print_on(tty);
|
||||
}
|
||||
|
||||
void ZPage::verify_live(uint32_t live_objects, size_t live_bytes) const {
|
||||
guarantee(live_objects == _livemap.live_objects(), "Invalid number of live objects");
|
||||
guarantee(live_bytes == _livemap.live_bytes(), "Invalid number of live bytes");
|
||||
}
|
||||
|
@ -82,6 +82,7 @@ public:
|
||||
void set_last_used();
|
||||
|
||||
void reset();
|
||||
void reset_for_in_place_relocation();
|
||||
|
||||
ZPage* retype(uint8_t type);
|
||||
ZPage* split(size_t size);
|
||||
@ -109,6 +110,8 @@ public:
|
||||
|
||||
void print_on(outputStream* out) const;
|
||||
void print() const;
|
||||
|
||||
void verify_live(uint32_t live_objects, size_t live_bytes) const;
|
||||
};
|
||||
|
||||
class ZPageClosure {
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zLock.inline.hpp"
|
||||
#include "gc/z/zPage.inline.hpp"
|
||||
#include "gc/z/zPageAllocator.hpp"
|
||||
#include "gc/z/zPageAllocator.inline.hpp"
|
||||
#include "gc/z/zPageCache.hpp"
|
||||
#include "gc/z/zSafeDelete.inline.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
@ -130,22 +130,19 @@ public:
|
||||
ZPageAllocator::ZPageAllocator(ZWorkers* workers,
|
||||
size_t min_capacity,
|
||||
size_t initial_capacity,
|
||||
size_t max_capacity,
|
||||
size_t max_reserve) :
|
||||
size_t max_capacity) :
|
||||
_lock(),
|
||||
_cache(),
|
||||
_virtual(max_capacity),
|
||||
_physical(max_capacity),
|
||||
_min_capacity(min_capacity),
|
||||
_max_capacity(max_capacity),
|
||||
_max_reserve(max_reserve),
|
||||
_current_max_capacity(max_capacity),
|
||||
_capacity(0),
|
||||
_claimed(0),
|
||||
_used(0),
|
||||
_used_high(0),
|
||||
_used_low(0),
|
||||
_allocated(0),
|
||||
_reclaimed(0),
|
||||
_stalled(),
|
||||
_satisfied(),
|
||||
@ -161,7 +158,6 @@ ZPageAllocator::ZPageAllocator(ZWorkers* workers,
|
||||
log_info_p(gc, init)("Min Capacity: " SIZE_FORMAT "M", min_capacity / M);
|
||||
log_info_p(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M);
|
||||
log_info_p(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M);
|
||||
log_info_p(gc, init)("Max Reserve: " SIZE_FORMAT "M", max_reserve / M);
|
||||
if (ZPageSizeMedium > 0) {
|
||||
log_info_p(gc, init)("Medium Page Size: " SIZE_FORMAT "M", ZPageSizeMedium / M);
|
||||
} else {
|
||||
@ -259,18 +255,6 @@ size_t ZPageAllocator::capacity() const {
|
||||
return Atomic::load(&_capacity);
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::max_reserve() const {
|
||||
return _max_reserve;
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::used_high() const {
|
||||
return _used_high;
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::used_low() const {
|
||||
return _used_low;
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::used() const {
|
||||
return Atomic::load(&_used);
|
||||
}
|
||||
@ -279,22 +263,24 @@ size_t ZPageAllocator::unused() const {
|
||||
const ssize_t capacity = (ssize_t)Atomic::load(&_capacity);
|
||||
const ssize_t used = (ssize_t)Atomic::load(&_used);
|
||||
const ssize_t claimed = (ssize_t)Atomic::load(&_claimed);
|
||||
const ssize_t max_reserve = (ssize_t)_max_reserve;
|
||||
const ssize_t unused = capacity - used - claimed - max_reserve;
|
||||
const ssize_t unused = capacity - used - claimed;
|
||||
return unused > 0 ? (size_t)unused : 0;
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::allocated() const {
|
||||
return _allocated;
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::reclaimed() const {
|
||||
return _reclaimed > 0 ? (size_t)_reclaimed : 0;
|
||||
ZPageAllocatorStats ZPageAllocator::stats() const {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
return ZPageAllocatorStats(_min_capacity,
|
||||
_max_capacity,
|
||||
soft_max_capacity(),
|
||||
_capacity,
|
||||
_used,
|
||||
_used_high,
|
||||
_used_low,
|
||||
_reclaimed);
|
||||
}
|
||||
|
||||
void ZPageAllocator::reset_statistics() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
||||
_allocated = 0;
|
||||
_reclaimed = 0;
|
||||
_used_high = _used_low = _used;
|
||||
}
|
||||
@ -333,13 +319,12 @@ void ZPageAllocator::decrease_capacity(size_t size, bool set_max_capacity) {
|
||||
}
|
||||
}
|
||||
|
||||
void ZPageAllocator::increase_used(size_t size, bool relocation) {
|
||||
if (relocation) {
|
||||
// Allocating a page for the purpose of relocation has a
|
||||
// negative contribution to the number of reclaimed bytes.
|
||||
void ZPageAllocator::increase_used(size_t size, bool worker_relocation) {
|
||||
if (worker_relocation) {
|
||||
// Allocating a page for the purpose of worker relocation has
|
||||
// a negative contribution to the number of reclaimed bytes.
|
||||
_reclaimed -= size;
|
||||
}
|
||||
_allocated += size;
|
||||
|
||||
// Update atomically since we have concurrent readers
|
||||
const size_t used = Atomic::add(&_used, size);
|
||||
@ -355,8 +340,6 @@ void ZPageAllocator::decrease_used(size_t size, bool reclaimed) {
|
||||
// to undo an allocation.
|
||||
if (reclaimed) {
|
||||
_reclaimed += size;
|
||||
} else {
|
||||
_allocated -= size;
|
||||
}
|
||||
|
||||
// Update atomically since we have concurrent readers
|
||||
@ -401,45 +384,23 @@ void ZPageAllocator::destroy_page(ZPage* page) {
|
||||
_safe_delete(page);
|
||||
}
|
||||
|
||||
bool ZPageAllocator::is_alloc_allowed(size_t size, bool no_reserve) const {
|
||||
size_t available = _current_max_capacity - _used - _claimed;
|
||||
|
||||
if (no_reserve) {
|
||||
// The reserve should not be considered available
|
||||
available -= MIN2(available, _max_reserve);
|
||||
}
|
||||
|
||||
bool ZPageAllocator::is_alloc_allowed(size_t size) const {
|
||||
const size_t available = _current_max_capacity - _used - _claimed;
|
||||
return available >= size;
|
||||
}
|
||||
|
||||
bool ZPageAllocator::is_alloc_allowed_from_cache(size_t size, bool no_reserve) const {
|
||||
size_t available = _capacity - _used - _claimed;
|
||||
|
||||
if (no_reserve) {
|
||||
// The reserve should not be considered available
|
||||
available -= MIN2(available, _max_reserve);
|
||||
} else if (_capacity != _current_max_capacity) {
|
||||
// Always increase capacity before using the reserve
|
||||
return false;
|
||||
}
|
||||
|
||||
return available >= size;
|
||||
}
|
||||
|
||||
bool ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve, ZList<ZPage>* pages) {
|
||||
if (!is_alloc_allowed(size, no_reserve)) {
|
||||
bool ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, ZList<ZPage>* pages) {
|
||||
if (!is_alloc_allowed(size)) {
|
||||
// Out of memory
|
||||
return false;
|
||||
}
|
||||
|
||||
// Try allocate from the page cache
|
||||
if (is_alloc_allowed_from_cache(size, no_reserve)) {
|
||||
ZPage* const page = _cache.alloc_page(type, size);
|
||||
if (page != NULL) {
|
||||
// Success
|
||||
pages->insert_last(page);
|
||||
return true;
|
||||
}
|
||||
ZPage* const page = _cache.alloc_page(type, size);
|
||||
if (page != NULL) {
|
||||
// Success
|
||||
pages->insert_last(page);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Try increase capacity
|
||||
@ -461,17 +422,13 @@ bool ZPageAllocator::alloc_page_common(ZPageAllocation* allocation) {
|
||||
const ZAllocationFlags flags = allocation->flags();
|
||||
ZList<ZPage>* const pages = allocation->pages();
|
||||
|
||||
// Try allocate without using the reserve
|
||||
if (!alloc_page_common_inner(type, size, true /* no_reserve */, pages)) {
|
||||
// If allowed to, try allocate using the reserve
|
||||
if (flags.no_reserve() || !alloc_page_common_inner(type, size, false /* no_reserve */, pages)) {
|
||||
// Out of memory
|
||||
return false;
|
||||
}
|
||||
if (!alloc_page_common_inner(type, size, pages)) {
|
||||
// Out of memory
|
||||
return false;
|
||||
}
|
||||
|
||||
// Updated used statistics
|
||||
increase_used(size, flags.relocation());
|
||||
increase_used(size, flags.worker_relocation());
|
||||
|
||||
// Success
|
||||
return true;
|
||||
@ -689,9 +646,9 @@ retry:
|
||||
// where the global sequence number was updated.
|
||||
page->reset();
|
||||
|
||||
// Update allocation statistics. Exclude worker threads to avoid
|
||||
// artificial inflation of the allocation rate due to relocation.
|
||||
if (!flags.worker_thread()) {
|
||||
// Update allocation statistics. Exclude worker relocations to avoid
|
||||
// artificial inflation of the allocation rate during relocation.
|
||||
if (!flags.worker_relocation()) {
|
||||
// Note that there are two allocation rate counters, which have
|
||||
// different purposes and are sampled at different frequencies.
|
||||
const size_t bytes = page->size();
|
||||
@ -701,7 +658,7 @@ retry:
|
||||
|
||||
// Send event
|
||||
event.commit(type, size, allocation.flushed(), allocation.committed(),
|
||||
page->physical_memory().nsegments(), flags.non_blocking(), flags.no_reserve());
|
||||
page->physical_memory().nsegments(), flags.non_blocking());
|
||||
|
||||
return page;
|
||||
}
|
||||
@ -776,11 +733,10 @@ size_t ZPageAllocator::uncommit(uint64_t* timeout) {
|
||||
SuspendibleThreadSetJoiner joiner(!ZVerifyViews);
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
// Never uncommit the reserve, and never uncommit below min capacity. We flush
|
||||
// out and uncommit chunks at a time (~0.8% of the max capacity, but at least
|
||||
// one granule and at most 256M), in case demand for memory increases while we
|
||||
// are uncommitting.
|
||||
const size_t retain = clamp(_used + _max_reserve, _min_capacity, _capacity);
|
||||
// Never uncommit below min capacity. We flush out and uncommit chunks at
|
||||
// a time (~0.8% of the max capacity, but at least one granule and at most
|
||||
// 256M), in case demand for memory increases while we are uncommitting.
|
||||
const size_t retain = MAX2(_used, _min_capacity);
|
||||
const size_t release = _capacity - retain;
|
||||
const size_t limit = MIN2(align_up(_current_max_capacity >> 7, ZGranuleSize), 256 * M);
|
||||
const size_t flush = MIN2(release, limit);
|
||||
|
@ -35,6 +35,7 @@
|
||||
|
||||
class ThreadClosure;
|
||||
class ZPageAllocation;
|
||||
class ZPageAllocatorStats;
|
||||
class ZWorkers;
|
||||
class ZUncommitter;
|
||||
class ZUnmapper;
|
||||
@ -45,20 +46,18 @@ class ZPageAllocator {
|
||||
friend class ZUncommitter;
|
||||
|
||||
private:
|
||||
ZLock _lock;
|
||||
mutable ZLock _lock;
|
||||
ZPageCache _cache;
|
||||
ZVirtualMemoryManager _virtual;
|
||||
ZPhysicalMemoryManager _physical;
|
||||
const size_t _min_capacity;
|
||||
const size_t _max_capacity;
|
||||
const size_t _max_reserve;
|
||||
volatile size_t _current_max_capacity;
|
||||
volatile size_t _capacity;
|
||||
volatile size_t _claimed;
|
||||
volatile size_t _used;
|
||||
size_t _used_high;
|
||||
size_t _used_low;
|
||||
size_t _allocated;
|
||||
ssize_t _reclaimed;
|
||||
ZList<ZPageAllocation> _stalled;
|
||||
ZList<ZPageAllocation> _satisfied;
|
||||
@ -83,10 +82,9 @@ private:
|
||||
|
||||
void destroy_page(ZPage* page);
|
||||
|
||||
bool is_alloc_allowed(size_t size, bool no_reserve) const;
|
||||
bool is_alloc_allowed_from_cache(size_t size, bool no_reserve) const;
|
||||
bool is_alloc_allowed(size_t size) const;
|
||||
|
||||
bool alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve, ZList<ZPage>* pages);
|
||||
bool alloc_page_common_inner(uint8_t type, size_t size, ZList<ZPage>* pages);
|
||||
bool alloc_page_common(ZPageAllocation* allocation);
|
||||
bool alloc_page_stall(ZPageAllocation* allocation);
|
||||
bool alloc_page_or_stall(ZPageAllocation* allocation);
|
||||
@ -104,8 +102,7 @@ public:
|
||||
ZPageAllocator(ZWorkers* workers,
|
||||
size_t min_capacity,
|
||||
size_t initial_capacity,
|
||||
size_t max_capacity,
|
||||
size_t max_reserve);
|
||||
size_t max_capacity);
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
@ -113,13 +110,10 @@ public:
|
||||
size_t max_capacity() const;
|
||||
size_t soft_max_capacity() const;
|
||||
size_t capacity() const;
|
||||
size_t max_reserve() const;
|
||||
size_t used_high() const;
|
||||
size_t used_low() const;
|
||||
size_t used() const;
|
||||
size_t unused() const;
|
||||
size_t allocated() const;
|
||||
size_t reclaimed() const;
|
||||
|
||||
ZPageAllocatorStats stats() const;
|
||||
|
||||
void reset_statistics();
|
||||
|
||||
@ -141,4 +135,36 @@ public:
|
||||
void threads_do(ThreadClosure* tc) const;
|
||||
};
|
||||
|
||||
class ZPageAllocatorStats {
|
||||
private:
|
||||
size_t _min_capacity;
|
||||
size_t _max_capacity;
|
||||
size_t _soft_max_capacity;
|
||||
size_t _current_max_capacity;
|
||||
size_t _capacity;
|
||||
size_t _used;
|
||||
size_t _used_high;
|
||||
size_t _used_low;
|
||||
size_t _reclaimed;
|
||||
|
||||
public:
|
||||
ZPageAllocatorStats(size_t min_capacity,
|
||||
size_t max_capacity,
|
||||
size_t soft_max_capacity,
|
||||
size_t capacity,
|
||||
size_t used,
|
||||
size_t used_high,
|
||||
size_t used_low,
|
||||
size_t reclaimed);
|
||||
|
||||
size_t min_capacity() const;
|
||||
size_t max_capacity() const;
|
||||
size_t soft_max_capacity() const;
|
||||
size_t capacity() const;
|
||||
size_t used() const;
|
||||
size_t used_high() const;
|
||||
size_t used_low() const;
|
||||
size_t reclaimed() const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZPAGEALLOCATOR_HPP
|
||||
|
78
src/hotspot/share/gc/z/zPageAllocator.inline.hpp
Normal file
78
src/hotspot/share/gc/z/zPageAllocator.inline.hpp
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZPAGEALLOCATOR_INLINE_HPP
|
||||
#define SHARE_GC_Z_ZPAGEALLOCATOR_INLINE_HPP
|
||||
|
||||
#include "gc/z/zPageAllocator.hpp"
|
||||
|
||||
inline ZPageAllocatorStats::ZPageAllocatorStats(size_t min_capacity,
|
||||
size_t max_capacity,
|
||||
size_t soft_max_capacity,
|
||||
size_t capacity,
|
||||
size_t used,
|
||||
size_t used_high,
|
||||
size_t used_low,
|
||||
size_t reclaimed) :
|
||||
_min_capacity(min_capacity),
|
||||
_max_capacity(max_capacity),
|
||||
_soft_max_capacity(soft_max_capacity),
|
||||
_capacity(capacity),
|
||||
_used(used),
|
||||
_used_high(used_high),
|
||||
_used_low(used_low),
|
||||
_reclaimed(reclaimed) {}
|
||||
|
||||
inline size_t ZPageAllocatorStats::min_capacity() const {
|
||||
return _min_capacity;
|
||||
}
|
||||
|
||||
inline size_t ZPageAllocatorStats::max_capacity() const {
|
||||
return _max_capacity;
|
||||
}
|
||||
|
||||
inline size_t ZPageAllocatorStats::soft_max_capacity() const {
|
||||
return _soft_max_capacity;
|
||||
}
|
||||
|
||||
inline size_t ZPageAllocatorStats::capacity() const {
|
||||
return _capacity;
|
||||
}
|
||||
|
||||
inline size_t ZPageAllocatorStats::used() const {
|
||||
return _used;
|
||||
}
|
||||
|
||||
inline size_t ZPageAllocatorStats::used_high() const {
|
||||
return _used_high;
|
||||
}
|
||||
|
||||
inline size_t ZPageAllocatorStats::used_low() const {
|
||||
return _used_low;
|
||||
}
|
||||
|
||||
inline size_t ZPageAllocatorStats::reclaimed() const {
|
||||
return _reclaimed;
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZPAGEALLOCATOR_INLINE_HPP
|
@ -29,158 +29,377 @@
|
||||
#include "gc/z/zPage.inline.hpp"
|
||||
#include "gc/z/zRelocate.hpp"
|
||||
#include "gc/z/zRelocationSet.inline.hpp"
|
||||
#include "gc/z/zRootsIterator.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zTask.hpp"
|
||||
#include "gc/z/zThread.inline.hpp"
|
||||
#include "gc/z/zWorkers.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "prims/jvmtiTagMap.hpp"
|
||||
|
||||
static const ZStatCounter ZCounterRelocationContention("Contention", "Relocation Contention", ZStatUnitOpsPerSecond);
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
ZRelocate::ZRelocate(ZWorkers* workers) :
|
||||
_workers(workers) {}
|
||||
|
||||
uintptr_t ZRelocate::relocate_object_inner(ZForwarding* forwarding, uintptr_t from_index, uintptr_t from_offset) const {
|
||||
static uintptr_t forwarding_index(ZForwarding* forwarding, uintptr_t from_addr) {
|
||||
const uintptr_t from_offset = ZAddress::offset(from_addr);
|
||||
return (from_offset - forwarding->start()) >> forwarding->object_alignment_shift();
|
||||
}
|
||||
|
||||
static uintptr_t forwarding_find(ZForwarding* forwarding, uintptr_t from_addr, ZForwardingCursor* cursor) {
|
||||
const uintptr_t from_index = forwarding_index(forwarding, from_addr);
|
||||
const ZForwardingEntry entry = forwarding->find(from_index, cursor);
|
||||
return entry.populated() ? ZAddress::good(entry.to_offset()) : 0;
|
||||
}
|
||||
|
||||
static uintptr_t forwarding_insert(ZForwarding* forwarding, uintptr_t from_addr, uintptr_t to_addr, ZForwardingCursor* cursor) {
|
||||
const uintptr_t from_index = forwarding_index(forwarding, from_addr);
|
||||
const uintptr_t to_offset = ZAddress::offset(to_addr);
|
||||
const uintptr_t to_offset_final = forwarding->insert(from_index, to_offset, cursor);
|
||||
return ZAddress::good(to_offset_final);
|
||||
}
|
||||
|
||||
uintptr_t ZRelocate::relocate_object_inner(ZForwarding* forwarding, uintptr_t from_addr) const {
|
||||
ZForwardingCursor cursor;
|
||||
|
||||
// Lookup forwarding entry
|
||||
const ZForwardingEntry entry = forwarding->find(from_index, &cursor);
|
||||
if (entry.populated() && entry.from_index() == from_index) {
|
||||
// Already relocated, return new address
|
||||
return entry.to_offset();
|
||||
// Lookup forwarding
|
||||
uintptr_t to_addr = forwarding_find(forwarding, from_addr, &cursor);
|
||||
if (to_addr != 0) {
|
||||
// Already relocated
|
||||
return to_addr;
|
||||
}
|
||||
|
||||
assert(ZHeap::heap()->is_object_live(ZAddress::good(from_offset)), "Should be live");
|
||||
|
||||
if (forwarding->is_pinned()) {
|
||||
// In-place forward
|
||||
return forwarding->insert(from_index, from_offset, &cursor);
|
||||
}
|
||||
assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
|
||||
|
||||
// Allocate object
|
||||
const uintptr_t from_good = ZAddress::good(from_offset);
|
||||
const size_t size = ZUtils::object_size(from_good);
|
||||
const uintptr_t to_good = ZHeap::heap()->alloc_object_for_relocation(size);
|
||||
if (to_good == 0) {
|
||||
// Failed, in-place forward
|
||||
return forwarding->insert(from_index, from_offset, &cursor);
|
||||
const size_t size = ZUtils::object_size(from_addr);
|
||||
to_addr = ZHeap::heap()->alloc_object_non_blocking(size);
|
||||
if (to_addr == 0) {
|
||||
// Allocation failed
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Copy object
|
||||
ZUtils::object_copy(from_good, to_good, size);
|
||||
ZUtils::object_copy_disjoint(from_addr, to_addr, size);
|
||||
|
||||
// Insert forwarding entry
|
||||
const uintptr_t to_offset = ZAddress::offset(to_good);
|
||||
const uintptr_t to_offset_final = forwarding->insert(from_index, to_offset, &cursor);
|
||||
if (to_offset_final == to_offset) {
|
||||
// Relocation succeeded
|
||||
return to_offset;
|
||||
// Insert forwarding
|
||||
const uintptr_t to_addr_final = forwarding_insert(forwarding, from_addr, to_addr, &cursor);
|
||||
if (to_addr_final != to_addr) {
|
||||
// Already relocated, try undo allocation
|
||||
ZHeap::heap()->undo_alloc_object(to_addr, size);
|
||||
}
|
||||
|
||||
// Relocation contention
|
||||
ZStatInc(ZCounterRelocationContention);
|
||||
log_trace(gc)("Relocation contention, thread: " PTR_FORMAT " (%s), forwarding: " PTR_FORMAT
|
||||
", entry: " SIZE_FORMAT ", oop: " PTR_FORMAT ", size: " SIZE_FORMAT,
|
||||
ZThread::id(), ZThread::name(), p2i(forwarding), cursor, from_good, size);
|
||||
|
||||
// Try undo allocation
|
||||
ZHeap::heap()->undo_alloc_object_for_relocation(to_good, size);
|
||||
|
||||
return to_offset_final;
|
||||
return to_addr_final;
|
||||
}
|
||||
|
||||
uintptr_t ZRelocate::relocate_object(ZForwarding* forwarding, uintptr_t from_addr) const {
|
||||
const uintptr_t from_offset = ZAddress::offset(from_addr);
|
||||
const uintptr_t from_index = (from_offset - forwarding->start()) >> forwarding->object_alignment_shift();
|
||||
const uintptr_t to_offset = relocate_object_inner(forwarding, from_index, from_offset);
|
||||
// Relocate object
|
||||
if (forwarding->retain_page()) {
|
||||
const uintptr_t to_addr = relocate_object_inner(forwarding, from_addr);
|
||||
forwarding->release_page();
|
||||
|
||||
if (from_offset == to_offset) {
|
||||
// In-place forwarding, pin page
|
||||
forwarding->set_pinned();
|
||||
if (to_addr != 0) {
|
||||
// Success
|
||||
return to_addr;
|
||||
}
|
||||
|
||||
// Failed to relocate object. Wait for a worker thread to
|
||||
// complete relocation of this page, and then forward object.
|
||||
forwarding->wait_page_released();
|
||||
}
|
||||
|
||||
return ZAddress::good(to_offset);
|
||||
// Forward object
|
||||
return forward_object(forwarding, from_addr);
|
||||
}
|
||||
|
||||
uintptr_t ZRelocate::forward_object(ZForwarding* forwarding, uintptr_t from_addr) const {
|
||||
const uintptr_t from_offset = ZAddress::offset(from_addr);
|
||||
const uintptr_t from_index = (from_offset - forwarding->start()) >> forwarding->object_alignment_shift();
|
||||
const ZForwardingEntry entry = forwarding->find(from_index);
|
||||
|
||||
assert(entry.populated(), "Should be forwarded");
|
||||
assert(entry.from_index() == from_index, "Should be forwarded");
|
||||
|
||||
return ZAddress::good(entry.to_offset());
|
||||
ZForwardingCursor cursor;
|
||||
const uintptr_t to_addr = forwarding_find(forwarding, from_addr, &cursor);
|
||||
assert(to_addr != 0, "Should be forwarded");
|
||||
return to_addr;
|
||||
}
|
||||
|
||||
class ZRelocateObjectClosure : public ObjectClosure {
|
||||
static ZPage* alloc_page(const ZForwarding* forwarding) {
|
||||
if (ZStressRelocateInPlace) {
|
||||
// Simulate failure to allocate a new page. This will
|
||||
// cause the page being relocated to be relocated in-place.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ZAllocationFlags flags;
|
||||
flags.set_non_blocking();
|
||||
flags.set_worker_relocation();
|
||||
return ZHeap::heap()->alloc_page(forwarding->type(), forwarding->size(), flags);
|
||||
}
|
||||
|
||||
static void free_page(ZPage* page) {
|
||||
ZHeap::heap()->free_page(page, true /* reclaimed */);
|
||||
}
|
||||
|
||||
static bool should_free_target_page(ZPage* page) {
|
||||
// Free target page if it is empty. We can end up with an empty target
|
||||
// page if we allocated a new target page, and then lost the race to
|
||||
// relocate the remaining objects, leaving the target page empty when
|
||||
// relocation completed.
|
||||
return page != NULL && page->top() == page->start();
|
||||
}
|
||||
|
||||
class ZRelocateSmallAllocator {
|
||||
private:
|
||||
ZRelocate* const _relocate;
|
||||
ZForwarding* const _forwarding;
|
||||
volatile size_t _in_place_count;
|
||||
|
||||
public:
|
||||
ZRelocateObjectClosure(ZRelocate* relocate, ZForwarding* forwarding) :
|
||||
_relocate(relocate),
|
||||
_forwarding(forwarding) {}
|
||||
ZRelocateSmallAllocator() :
|
||||
_in_place_count(0) {}
|
||||
|
||||
virtual void do_object(oop o) {
|
||||
_relocate->relocate_object(_forwarding, ZOop::to_address(o));
|
||||
ZPage* alloc_target_page(ZForwarding* forwarding, ZPage* target) {
|
||||
ZPage* const page = alloc_page(forwarding);
|
||||
if (page == NULL) {
|
||||
Atomic::inc(&_in_place_count);
|
||||
}
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
void share_target_page(ZPage* page) {
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
void free_target_page(ZPage* page) {
|
||||
if (should_free_target_page(page)) {
|
||||
free_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
void free_relocated_page(ZPage* page) {
|
||||
free_page(page);
|
||||
}
|
||||
|
||||
uintptr_t alloc_object(ZPage* page, size_t size) const {
|
||||
return (page != NULL) ? page->alloc_object(size) : 0;
|
||||
}
|
||||
|
||||
void undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) const {
|
||||
page->undo_alloc_object(addr, size);
|
||||
}
|
||||
|
||||
const size_t in_place_count() const {
|
||||
return _in_place_count;
|
||||
}
|
||||
};
|
||||
|
||||
bool ZRelocate::work(ZRelocationSetParallelIterator* iter) {
|
||||
bool success = true;
|
||||
class ZRelocateMediumAllocator {
|
||||
private:
|
||||
ZConditionLock _lock;
|
||||
ZPage* _shared;
|
||||
bool _in_place;
|
||||
volatile size_t _in_place_count;
|
||||
|
||||
// Relocate pages in the relocation set
|
||||
for (ZForwarding* forwarding; iter->next(&forwarding);) {
|
||||
// Relocate objects in page
|
||||
ZRelocateObjectClosure cl(this, forwarding);
|
||||
forwarding->page()->object_iterate(&cl);
|
||||
public:
|
||||
ZRelocateMediumAllocator() :
|
||||
_lock(),
|
||||
_shared(NULL),
|
||||
_in_place(false),
|
||||
_in_place_count(0) {}
|
||||
|
||||
if (ZVerifyForwarding) {
|
||||
forwarding->verify();
|
||||
}
|
||||
|
||||
if (forwarding->is_pinned()) {
|
||||
// Relocation failed, page is now pinned
|
||||
success = false;
|
||||
} else {
|
||||
// Relocation succeeded, release page
|
||||
forwarding->release_page();
|
||||
~ZRelocateMediumAllocator() {
|
||||
if (should_free_target_page(_shared)) {
|
||||
free_page(_shared);
|
||||
}
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
ZPage* alloc_target_page(ZForwarding* forwarding, ZPage* target) {
|
||||
ZLocker<ZConditionLock> locker(&_lock);
|
||||
|
||||
// Wait for any ongoing in-place relocation to complete
|
||||
while (_in_place) {
|
||||
_lock.wait();
|
||||
}
|
||||
|
||||
// Allocate a new page only if the shared page is the same as the
|
||||
// current target page. The shared page will be different from the
|
||||
// current target page if another thread shared a page, or allocated
|
||||
// a new page.
|
||||
if (_shared == target) {
|
||||
_shared = alloc_page(forwarding);
|
||||
if (_shared == NULL) {
|
||||
Atomic::inc(&_in_place_count);
|
||||
_in_place = true;
|
||||
}
|
||||
}
|
||||
|
||||
return _shared;
|
||||
}
|
||||
|
||||
void share_target_page(ZPage* page) {
|
||||
ZLocker<ZConditionLock> locker(&_lock);
|
||||
|
||||
assert(_in_place, "Invalid state");
|
||||
assert(_shared == NULL, "Invalid state");
|
||||
assert(page != NULL, "Invalid page");
|
||||
|
||||
_shared = page;
|
||||
_in_place = false;
|
||||
|
||||
_lock.notify_all();
|
||||
}
|
||||
|
||||
void free_target_page(ZPage* page) {
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
void free_relocated_page(ZPage* page) {
|
||||
free_page(page);
|
||||
}
|
||||
|
||||
uintptr_t alloc_object(ZPage* page, size_t size) const {
|
||||
return (page != NULL) ? page->alloc_object_atomic(size) : 0;
|
||||
}
|
||||
|
||||
void undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) const {
|
||||
page->undo_alloc_object_atomic(addr, size);
|
||||
}
|
||||
|
||||
const size_t in_place_count() const {
|
||||
return _in_place_count;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Allocator>
|
||||
class ZRelocateClosure : public ObjectClosure {
|
||||
private:
|
||||
Allocator* const _allocator;
|
||||
ZForwarding* _forwarding;
|
||||
ZPage* _target;
|
||||
|
||||
bool relocate_object(uintptr_t from_addr) const {
|
||||
ZForwardingCursor cursor;
|
||||
|
||||
// Lookup forwarding
|
||||
if (forwarding_find(_forwarding, from_addr, &cursor) != 0) {
|
||||
// Already relocated
|
||||
return true;
|
||||
}
|
||||
|
||||
// Allocate object
|
||||
const size_t size = ZUtils::object_size(from_addr);
|
||||
const uintptr_t to_addr = _allocator->alloc_object(_target, size);
|
||||
if (to_addr == 0) {
|
||||
// Allocation failed
|
||||
return false;
|
||||
}
|
||||
|
||||
// Copy object. Use conjoint copying if we are relocating
|
||||
// in-place and the new object overlapps with the old object.
|
||||
if (_forwarding->in_place() && to_addr + size > from_addr) {
|
||||
ZUtils::object_copy_conjoint(from_addr, to_addr, size);
|
||||
} else {
|
||||
ZUtils::object_copy_disjoint(from_addr, to_addr, size);
|
||||
}
|
||||
|
||||
// Insert forwarding
|
||||
if (forwarding_insert(_forwarding, from_addr, to_addr, &cursor) != to_addr) {
|
||||
// Already relocated, undo allocation
|
||||
_allocator->undo_alloc_object(_target, to_addr, size);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
virtual void do_object(oop obj) {
|
||||
const uintptr_t addr = ZOop::to_address(obj);
|
||||
assert(ZHeap::heap()->is_object_live(addr), "Should be live");
|
||||
|
||||
while (!relocate_object(addr)) {
|
||||
// Allocate a new target page, or if that fails, use the page being
|
||||
// relocated as the new target, which will cause it to be relocated
|
||||
// in-place.
|
||||
_target = _allocator->alloc_target_page(_forwarding, _target);
|
||||
if (_target != NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Claim the page being relocated to block other threads from accessing
|
||||
// it, or its forwarding table, until it has been released (relocation
|
||||
// completed).
|
||||
_target = _forwarding->claim_page();
|
||||
_target->reset_for_in_place_relocation();
|
||||
_forwarding->set_in_place();
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
ZRelocateClosure(Allocator* allocator) :
|
||||
_allocator(allocator),
|
||||
_forwarding(NULL),
|
||||
_target(NULL) {}
|
||||
|
||||
~ZRelocateClosure() {
|
||||
_allocator->free_target_page(_target);
|
||||
}
|
||||
|
||||
void do_forwarding(ZForwarding* forwarding) {
|
||||
// Relocate objects
|
||||
_forwarding = forwarding;
|
||||
_forwarding->object_iterate(this);
|
||||
|
||||
// Verify
|
||||
if (ZVerifyForwarding) {
|
||||
_forwarding->verify();
|
||||
}
|
||||
|
||||
// Release relocated page
|
||||
_forwarding->release_page();
|
||||
|
||||
if (_forwarding->in_place()) {
|
||||
// The relocated page has been relocated in-place and should not
|
||||
// be freed. Keep it as target page until it is full, and offer to
|
||||
// share it with other worker threads.
|
||||
_allocator->share_target_page(_target);
|
||||
} else {
|
||||
// Detach and free relocated page
|
||||
ZPage* const page = _forwarding->detach_page();
|
||||
_allocator->free_relocated_page(page);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class ZRelocateTask : public ZTask {
|
||||
private:
|
||||
ZRelocate* const _relocate;
|
||||
ZRelocationSetParallelIterator _iter;
|
||||
bool _failed;
|
||||
ZRelocateSmallAllocator _small_allocator;
|
||||
ZRelocateMediumAllocator _medium_allocator;
|
||||
|
||||
public:
|
||||
ZRelocateTask(ZRelocate* relocate, ZRelocationSet* relocation_set) :
|
||||
ZTask("ZRelocateTask"),
|
||||
_relocate(relocate),
|
||||
_iter(relocation_set),
|
||||
_failed(false) {}
|
||||
|
||||
virtual void work() {
|
||||
if (!_relocate->work(&_iter)) {
|
||||
_failed = true;
|
||||
}
|
||||
static bool is_small(ZForwarding* forwarding) {
|
||||
return forwarding->type() == ZPageTypeSmall;
|
||||
}
|
||||
|
||||
bool failed() const {
|
||||
return _failed;
|
||||
public:
|
||||
ZRelocateTask(ZRelocationSet* relocation_set) :
|
||||
ZTask("ZRelocateTask"),
|
||||
_iter(relocation_set),
|
||||
_small_allocator(),
|
||||
_medium_allocator() {}
|
||||
|
||||
~ZRelocateTask() {
|
||||
ZStatRelocation::set_at_relocate_end(_small_allocator.in_place_count(),
|
||||
_medium_allocator.in_place_count());
|
||||
}
|
||||
|
||||
virtual void work() {
|
||||
ZRelocateClosure<ZRelocateSmallAllocator> small(&_small_allocator);
|
||||
ZRelocateClosure<ZRelocateMediumAllocator> medium(&_medium_allocator);
|
||||
|
||||
for (ZForwarding* forwarding; _iter.next(&forwarding);) {
|
||||
if (is_small(forwarding)) {
|
||||
small.do_forwarding(forwarding);
|
||||
} else {
|
||||
medium.do_forwarding(forwarding);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
bool ZRelocate::relocate(ZRelocationSet* relocation_set) {
|
||||
ZRelocateTask task(this, relocation_set);
|
||||
void ZRelocate::relocate(ZRelocationSet* relocation_set) {
|
||||
ZRelocateTask task(relocation_set);
|
||||
_workers->run_concurrent(&task);
|
||||
return !task.failed();
|
||||
}
|
||||
|
@ -35,9 +35,8 @@ class ZRelocate {
|
||||
private:
|
||||
ZWorkers* const _workers;
|
||||
|
||||
ZForwarding* forwarding_for_page(ZPage* page) const;
|
||||
uintptr_t relocate_object_inner(ZForwarding* forwarding, uintptr_t from_index, uintptr_t from_offset) const;
|
||||
bool work(ZRelocationSetParallelIterator* iter);
|
||||
uintptr_t relocate_object_inner(ZForwarding* forwarding, uintptr_t from_addr) const;
|
||||
void work(ZRelocationSetParallelIterator* iter);
|
||||
|
||||
public:
|
||||
ZRelocate(ZWorkers* workers);
|
||||
@ -46,7 +45,7 @@ public:
|
||||
uintptr_t forward_object(ZForwarding* forwarding, uintptr_t from_addr) const;
|
||||
|
||||
void start();
|
||||
bool relocate(ZRelocationSet* relocation_set);
|
||||
void relocate(ZRelocationSet* relocation_set);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZRELOCATE_HPP
|
||||
|
@ -25,7 +25,7 @@
|
||||
#include "gc/z/zArray.inline.hpp"
|
||||
#include "gc/z/zForwarding.inline.hpp"
|
||||
#include "gc/z/zForwardingAllocator.inline.hpp"
|
||||
#include "gc/z/zRelocationSet.hpp"
|
||||
#include "gc/z/zRelocationSet.inline.hpp"
|
||||
#include "gc/z/zRelocationSetSelector.inline.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zTask.hpp"
|
||||
@ -125,5 +125,11 @@ void ZRelocationSet::install(const ZRelocationSetSelector* selector) {
|
||||
}
|
||||
|
||||
void ZRelocationSet::reset() {
|
||||
// Destroy forwardings
|
||||
ZRelocationSetIterator iter(this);
|
||||
for (ZForwarding* forwarding; iter.next(&forwarding);) {
|
||||
forwarding->~ZForwarding();
|
||||
}
|
||||
|
||||
_nforwardings = 0;
|
||||
}
|
||||
|
@ -36,10 +36,8 @@ ZRelocationSetSelectorGroupStats::ZRelocationSetSelectorGroupStats() :
|
||||
_npages(0),
|
||||
_total(0),
|
||||
_live(0),
|
||||
_garbage(0),
|
||||
_empty(0),
|
||||
_compacting_from(0),
|
||||
_compacting_to(0) {}
|
||||
_relocate(0) {}
|
||||
|
||||
ZRelocationSetSelectorGroup::ZRelocationSetSelectorGroup(const char* name,
|
||||
uint8_t page_type,
|
||||
@ -112,6 +110,7 @@ void ZRelocationSetSelectorGroup::select_inner() {
|
||||
const int npages = _live_pages.length();
|
||||
int selected_from = 0;
|
||||
int selected_to = 0;
|
||||
size_t selected_live_bytes = 0;
|
||||
size_t selected_forwarding_entries = 0;
|
||||
size_t from_live_bytes = 0;
|
||||
size_t from_forwarding_entries = 0;
|
||||
@ -140,6 +139,7 @@ void ZRelocationSetSelectorGroup::select_inner() {
|
||||
if (diff_reclaimable > ZFragmentationLimit) {
|
||||
selected_from = from;
|
||||
selected_to = to;
|
||||
selected_live_bytes = from_live_bytes;
|
||||
selected_forwarding_entries = from_forwarding_entries;
|
||||
}
|
||||
|
||||
@ -154,8 +154,7 @@ void ZRelocationSetSelectorGroup::select_inner() {
|
||||
_forwarding_entries = selected_forwarding_entries;
|
||||
|
||||
// Update statistics
|
||||
_stats._compacting_from = selected_from * _page_size;
|
||||
_stats._compacting_to = selected_to * _page_size;
|
||||
_stats._relocate = selected_live_bytes;
|
||||
|
||||
log_trace(gc, reloc)("Relocation Set (%s Pages): %d->%d, %d skipped, " SIZE_FORMAT " forwarding entries",
|
||||
_name, selected_from, selected_to, npages - selected_from, selected_forwarding_entries);
|
||||
@ -173,8 +172,7 @@ void ZRelocationSetSelectorGroup::select() {
|
||||
}
|
||||
|
||||
// Send event
|
||||
event.commit(_page_type, _stats.npages(), _stats.total(), _stats.empty(),
|
||||
_stats.compacting_from(), _stats.compacting_to());
|
||||
event.commit(_page_type, _stats.npages(), _stats.total(), _stats.empty(), _stats.relocate());
|
||||
}
|
||||
|
||||
ZRelocationSetSelector::ZRelocationSetSelector() :
|
||||
@ -198,7 +196,7 @@ void ZRelocationSetSelector::select() {
|
||||
_small.select();
|
||||
|
||||
// Send event
|
||||
event.commit(total(), empty(), compacting_from(), compacting_to());
|
||||
event.commit(total(), empty(), relocate());
|
||||
}
|
||||
|
||||
ZRelocationSetSelectorStats ZRelocationSetSelector::stats() const {
|
||||
|
@ -36,10 +36,8 @@ private:
|
||||
size_t _npages;
|
||||
size_t _total;
|
||||
size_t _live;
|
||||
size_t _garbage;
|
||||
size_t _empty;
|
||||
size_t _compacting_from;
|
||||
size_t _compacting_to;
|
||||
size_t _relocate;
|
||||
|
||||
public:
|
||||
ZRelocationSetSelectorGroupStats();
|
||||
@ -47,10 +45,8 @@ public:
|
||||
size_t npages() const;
|
||||
size_t total() const;
|
||||
size_t live() const;
|
||||
size_t garbage() const;
|
||||
size_t empty() const;
|
||||
size_t compacting_from() const;
|
||||
size_t compacting_to() const;
|
||||
size_t relocate() const;
|
||||
};
|
||||
|
||||
class ZRelocationSetSelectorStats {
|
||||
@ -108,8 +104,7 @@ private:
|
||||
|
||||
size_t total() const;
|
||||
size_t empty() const;
|
||||
size_t compacting_from() const;
|
||||
size_t compacting_to() const;
|
||||
size_t relocate() const;
|
||||
|
||||
public:
|
||||
ZRelocationSetSelector();
|
||||
|
@ -40,20 +40,12 @@ inline size_t ZRelocationSetSelectorGroupStats::live() const {
|
||||
return _live;
|
||||
}
|
||||
|
||||
inline size_t ZRelocationSetSelectorGroupStats::garbage() const {
|
||||
return _garbage;
|
||||
}
|
||||
|
||||
inline size_t ZRelocationSetSelectorGroupStats::empty() const {
|
||||
return _empty;
|
||||
}
|
||||
|
||||
inline size_t ZRelocationSetSelectorGroupStats::compacting_from() const {
|
||||
return _compacting_from;
|
||||
}
|
||||
|
||||
inline size_t ZRelocationSetSelectorGroupStats::compacting_to() const {
|
||||
return _compacting_to;
|
||||
inline size_t ZRelocationSetSelectorGroupStats::relocate() const {
|
||||
return _relocate;
|
||||
}
|
||||
|
||||
inline const ZRelocationSetSelectorGroupStats& ZRelocationSetSelectorStats::small() const {
|
||||
@ -81,7 +73,6 @@ inline void ZRelocationSetSelectorGroup::register_live_page(ZPage* page) {
|
||||
_stats._npages++;
|
||||
_stats._total += size;
|
||||
_stats._live += live;
|
||||
_stats._garbage += garbage;
|
||||
}
|
||||
|
||||
inline void ZRelocationSetSelectorGroup::register_empty_page(ZPage* page) {
|
||||
@ -89,7 +80,6 @@ inline void ZRelocationSetSelectorGroup::register_empty_page(ZPage* page) {
|
||||
|
||||
_stats._npages++;
|
||||
_stats._total += size;
|
||||
_stats._garbage += size;
|
||||
_stats._empty += size;
|
||||
}
|
||||
|
||||
@ -151,12 +141,8 @@ inline size_t ZRelocationSetSelector::empty() const {
|
||||
return _small.stats().empty() + _medium.stats().empty() + _large.stats().empty();
|
||||
}
|
||||
|
||||
inline size_t ZRelocationSetSelector::compacting_from() const {
|
||||
return _small.stats().compacting_from() + _medium.stats().compacting_from() + _large.stats().compacting_from();
|
||||
}
|
||||
|
||||
inline size_t ZRelocationSetSelector::compacting_to() const {
|
||||
return _small.stats().compacting_to() + _medium.stats().compacting_to() + _large.stats().compacting_to();
|
||||
inline size_t ZRelocationSetSelector::relocate() const {
|
||||
return _small.stats().relocate() + _medium.stats().relocate() + _large.stats().relocate();
|
||||
}
|
||||
|
||||
inline const ZArray<ZPage*>* ZRelocationSetSelector::small() const {
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "gc/z/zCPU.inline.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zNMethodTable.hpp"
|
||||
#include "gc/z/zPageAllocator.inline.hpp"
|
||||
#include "gc/z/zRelocationSetSelector.inline.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zTracer.inline.hpp"
|
||||
@ -1139,43 +1140,45 @@ void ZStatMark::print() {
|
||||
//
|
||||
// Stat relocation
|
||||
//
|
||||
ZRelocationSetSelectorStats ZStatRelocation::_stats;
|
||||
ZRelocationSetSelectorStats ZStatRelocation::_selector_stats;
|
||||
size_t ZStatRelocation::_forwarding_usage;
|
||||
bool ZStatRelocation::_success;
|
||||
size_t ZStatRelocation::_small_in_place_count;
|
||||
size_t ZStatRelocation::_medium_in_place_count;
|
||||
|
||||
void ZStatRelocation::set_at_select_relocation_set(const ZRelocationSetSelectorStats& stats) {
|
||||
_stats = stats;
|
||||
void ZStatRelocation::set_at_select_relocation_set(const ZRelocationSetSelectorStats& selector_stats) {
|
||||
_selector_stats = selector_stats;
|
||||
}
|
||||
|
||||
void ZStatRelocation::set_at_install_relocation_set(size_t forwarding_usage) {
|
||||
_forwarding_usage = forwarding_usage;
|
||||
}
|
||||
|
||||
void ZStatRelocation::set_at_relocate_end(bool success) {
|
||||
_success = success;
|
||||
void ZStatRelocation::set_at_relocate_end(size_t small_in_place_count, size_t medium_in_place_count) {
|
||||
_small_in_place_count = small_in_place_count;
|
||||
_medium_in_place_count = medium_in_place_count;
|
||||
}
|
||||
|
||||
void ZStatRelocation::print(const char* name, const ZRelocationSetSelectorGroupStats& group) {
|
||||
const size_t total = _stats.small().total() + _stats.medium().total() + _stats.large().total();
|
||||
|
||||
log_info(gc, reloc)("%s Pages: " SIZE_FORMAT " / " ZSIZE_FMT ", Empty: " ZSIZE_FMT ", Compacting: " ZSIZE_FMT "->" ZSIZE_FMT,
|
||||
void ZStatRelocation::print(const char* name,
|
||||
const ZRelocationSetSelectorGroupStats& selector_group,
|
||||
size_t in_place_count) {
|
||||
log_info(gc, reloc)("%s Pages: " SIZE_FORMAT " / " SIZE_FORMAT "M, Empty: " SIZE_FORMAT "M, "
|
||||
"Relocated: " SIZE_FORMAT "M, In-Place: " SIZE_FORMAT,
|
||||
name,
|
||||
group.npages(),
|
||||
ZSIZE_ARGS_WITH_MAX(group.total(), total),
|
||||
ZSIZE_ARGS_WITH_MAX(group.empty(), total),
|
||||
ZSIZE_ARGS_WITH_MAX(group.compacting_from(), total),
|
||||
ZSIZE_ARGS_WITH_MAX(group.compacting_to(), total));
|
||||
selector_group.npages(),
|
||||
selector_group.total() / M,
|
||||
selector_group.empty() / M,
|
||||
selector_group.relocate() / M,
|
||||
in_place_count);
|
||||
}
|
||||
|
||||
void ZStatRelocation::print() {
|
||||
print("Small", _stats.small());
|
||||
print("Small", _selector_stats.small(), _small_in_place_count);
|
||||
if (ZPageSizeMedium != 0) {
|
||||
print("Medium", _stats.medium());
|
||||
print("Medium", _selector_stats.medium(), _medium_in_place_count);
|
||||
}
|
||||
print("Large", _stats.large());
|
||||
print("Large", _selector_stats.large(), 0 /* in_place_count */);
|
||||
|
||||
log_info(gc, reloc)("Forwarding Usage: " SIZE_FORMAT "M", _forwarding_usage / M);
|
||||
log_info(gc, reloc)("Relocation: %s", _success ? "Successful" : "Incomplete");
|
||||
}
|
||||
|
||||
//
|
||||
@ -1270,88 +1273,70 @@ size_t ZStatHeap::capacity_low() {
|
||||
_at_relocate_end.capacity);
|
||||
}
|
||||
|
||||
size_t ZStatHeap::available(size_t used) {
|
||||
size_t ZStatHeap::free(size_t used) {
|
||||
return _at_initialize.max_capacity - used;
|
||||
}
|
||||
|
||||
size_t ZStatHeap::reserve(size_t used) {
|
||||
return MIN2(_at_initialize.max_reserve, available(used));
|
||||
size_t ZStatHeap::allocated(size_t used, size_t reclaimed) {
|
||||
// The amount of allocated memory between point A and B is used(B) - used(A).
|
||||
// However, we might also have reclaimed memory between point A and B. This
|
||||
// means the current amount of used memory must be incremented by the amount
|
||||
// reclaimed, so that used(B) represents the amount of used memory we would
|
||||
// have had if we had not reclaimed anything.
|
||||
return (used + reclaimed) - _at_mark_start.used;
|
||||
}
|
||||
|
||||
size_t ZStatHeap::free(size_t used) {
|
||||
return available(used) - reserve(used);
|
||||
size_t ZStatHeap::garbage(size_t reclaimed) {
|
||||
return _at_mark_end.garbage - reclaimed;
|
||||
}
|
||||
|
||||
void ZStatHeap::set_at_initialize(size_t min_capacity,
|
||||
size_t max_capacity,
|
||||
size_t max_reserve) {
|
||||
_at_initialize.min_capacity = min_capacity;
|
||||
_at_initialize.max_capacity = max_capacity;
|
||||
_at_initialize.max_reserve = max_reserve;
|
||||
void ZStatHeap::set_at_initialize(const ZPageAllocatorStats& stats) {
|
||||
_at_initialize.min_capacity = stats.min_capacity();
|
||||
_at_initialize.max_capacity = stats.max_capacity();
|
||||
}
|
||||
|
||||
void ZStatHeap::set_at_mark_start(size_t soft_max_capacity,
|
||||
size_t capacity,
|
||||
size_t used) {
|
||||
_at_mark_start.soft_max_capacity = soft_max_capacity;
|
||||
_at_mark_start.capacity = capacity;
|
||||
_at_mark_start.reserve = reserve(used);
|
||||
_at_mark_start.used = used;
|
||||
_at_mark_start.free = free(used);
|
||||
void ZStatHeap::set_at_mark_start(const ZPageAllocatorStats& stats) {
|
||||
_at_mark_start.soft_max_capacity = stats.soft_max_capacity();
|
||||
_at_mark_start.capacity = stats.capacity();
|
||||
_at_mark_start.free = free(stats.used());
|
||||
_at_mark_start.used = stats.used();
|
||||
}
|
||||
|
||||
void ZStatHeap::set_at_mark_end(size_t capacity,
|
||||
size_t allocated,
|
||||
size_t used) {
|
||||
_at_mark_end.capacity = capacity;
|
||||
_at_mark_end.reserve = reserve(used);
|
||||
_at_mark_end.allocated = allocated;
|
||||
_at_mark_end.used = used;
|
||||
_at_mark_end.free = free(used);
|
||||
void ZStatHeap::set_at_mark_end(const ZPageAllocatorStats& stats) {
|
||||
_at_mark_end.capacity = stats.capacity();
|
||||
_at_mark_end.free = free(stats.used());
|
||||
_at_mark_end.used = stats.used();
|
||||
_at_mark_end.allocated = allocated(stats.used(), 0 /* reclaimed */);
|
||||
}
|
||||
|
||||
void ZStatHeap::set_at_select_relocation_set(const ZRelocationSetSelectorStats& stats, size_t reclaimed) {
|
||||
void ZStatHeap::set_at_select_relocation_set(const ZRelocationSetSelectorStats& stats) {
|
||||
const size_t live = stats.small().live() + stats.medium().live() + stats.large().live();
|
||||
const size_t garbage = stats.small().garbage() + stats.medium().garbage() + stats.large().garbage();
|
||||
|
||||
_at_mark_end.live = live;
|
||||
_at_mark_end.garbage = garbage;
|
||||
|
||||
_at_relocate_start.garbage = garbage - reclaimed;
|
||||
_at_relocate_start.reclaimed = reclaimed;
|
||||
_at_mark_end.garbage = _at_mark_start.used - live;
|
||||
}
|
||||
|
||||
void ZStatHeap::set_at_relocate_start(size_t capacity,
|
||||
size_t allocated,
|
||||
size_t used) {
|
||||
_at_relocate_start.capacity = capacity;
|
||||
_at_relocate_start.reserve = reserve(used);
|
||||
_at_relocate_start.allocated = allocated;
|
||||
_at_relocate_start.used = used;
|
||||
_at_relocate_start.free = free(used);
|
||||
void ZStatHeap::set_at_relocate_start(const ZPageAllocatorStats& stats) {
|
||||
_at_relocate_start.capacity = stats.capacity();
|
||||
_at_relocate_start.free = free(stats.used());
|
||||
_at_relocate_start.used = stats.used();
|
||||
_at_relocate_start.allocated = allocated(stats.used(), stats.reclaimed());
|
||||
_at_relocate_start.garbage = garbage(stats.reclaimed());
|
||||
_at_relocate_start.reclaimed = stats.reclaimed();
|
||||
}
|
||||
|
||||
void ZStatHeap::set_at_relocate_end(size_t capacity,
|
||||
size_t allocated,
|
||||
size_t reclaimed,
|
||||
size_t used,
|
||||
size_t used_high,
|
||||
size_t used_low) {
|
||||
_at_relocate_end.capacity = capacity;
|
||||
void ZStatHeap::set_at_relocate_end(const ZPageAllocatorStats& stats) {
|
||||
_at_relocate_end.capacity = stats.capacity();
|
||||
_at_relocate_end.capacity_high = capacity_high();
|
||||
_at_relocate_end.capacity_low = capacity_low();
|
||||
_at_relocate_end.reserve = reserve(used);
|
||||
_at_relocate_end.reserve_high = reserve(used_low);
|
||||
_at_relocate_end.reserve_low = reserve(used_high);
|
||||
_at_relocate_end.garbage = _at_mark_end.garbage - reclaimed;
|
||||
_at_relocate_end.allocated = allocated;
|
||||
_at_relocate_end.reclaimed = reclaimed;
|
||||
_at_relocate_end.used = used;
|
||||
_at_relocate_end.used_high = used_high;
|
||||
_at_relocate_end.used_low = used_low;
|
||||
_at_relocate_end.free = free(used);
|
||||
_at_relocate_end.free_high = free(used_low);
|
||||
_at_relocate_end.free_low = free(used_high);
|
||||
_at_relocate_end.free = free(stats.used());
|
||||
_at_relocate_end.free_high = free(stats.used_low());
|
||||
_at_relocate_end.free_low = free(stats.used_high());
|
||||
_at_relocate_end.used = stats.used();
|
||||
_at_relocate_end.used_high = stats.used_high();
|
||||
_at_relocate_end.used_low = stats.used_low();
|
||||
_at_relocate_end.allocated = allocated(stats.used(), stats.reclaimed());
|
||||
_at_relocate_end.garbage = garbage(stats.reclaimed());
|
||||
_at_relocate_end.reclaimed = stats.reclaimed();
|
||||
}
|
||||
|
||||
size_t ZStatHeap::max_capacity() {
|
||||
@ -1393,15 +1378,6 @@ void ZStatHeap::print() {
|
||||
.left(ZTABLE_ARGS(_at_relocate_end.capacity_high))
|
||||
.left(ZTABLE_ARGS(_at_relocate_end.capacity_low))
|
||||
.end());
|
||||
log_info(gc, heap)("%s", table()
|
||||
.right("Reserve:")
|
||||
.left(ZTABLE_ARGS(_at_mark_start.reserve))
|
||||
.left(ZTABLE_ARGS(_at_mark_end.reserve))
|
||||
.left(ZTABLE_ARGS(_at_relocate_start.reserve))
|
||||
.left(ZTABLE_ARGS(_at_relocate_end.reserve))
|
||||
.left(ZTABLE_ARGS(_at_relocate_end.reserve_high))
|
||||
.left(ZTABLE_ARGS(_at_relocate_end.reserve_low))
|
||||
.end());
|
||||
log_info(gc, heap)("%s", table()
|
||||
.right("Free:")
|
||||
.left(ZTABLE_ARGS(_at_mark_start.free))
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
class ZPage;
|
||||
class ZPageAllocatorStats;
|
||||
class ZRelocationSetSelectorGroupStats;
|
||||
class ZRelocationSetSelectorStats;
|
||||
class ZStatSampler;
|
||||
@ -422,16 +423,19 @@ public:
|
||||
//
|
||||
class ZStatRelocation : public AllStatic {
|
||||
private:
|
||||
static ZRelocationSetSelectorStats _stats;
|
||||
static ZRelocationSetSelectorStats _selector_stats;
|
||||
static size_t _forwarding_usage;
|
||||
static bool _success;
|
||||
static size_t _small_in_place_count;
|
||||
static size_t _medium_in_place_count;
|
||||
|
||||
static void print(const char* name, const ZRelocationSetSelectorGroupStats& group);
|
||||
static void print(const char* name,
|
||||
const ZRelocationSetSelectorGroupStats& selector_group,
|
||||
size_t in_place_count);
|
||||
|
||||
public:
|
||||
static void set_at_select_relocation_set(const ZRelocationSetSelectorStats& stats);
|
||||
static void set_at_select_relocation_set(const ZRelocationSetSelectorStats& selector_stats);
|
||||
static void set_at_install_relocation_set(size_t forwarding_usage);
|
||||
static void set_at_relocate_end(bool success);
|
||||
static void set_at_relocate_end(size_t small_in_place_count, size_t medium_in_place_count);
|
||||
|
||||
static void print();
|
||||
};
|
||||
@ -483,82 +487,61 @@ private:
|
||||
static struct ZAtInitialize {
|
||||
size_t min_capacity;
|
||||
size_t max_capacity;
|
||||
size_t max_reserve;
|
||||
} _at_initialize;
|
||||
|
||||
static struct ZAtMarkStart {
|
||||
size_t soft_max_capacity;
|
||||
size_t capacity;
|
||||
size_t reserve;
|
||||
size_t used;
|
||||
size_t free;
|
||||
size_t used;
|
||||
} _at_mark_start;
|
||||
|
||||
static struct ZAtMarkEnd {
|
||||
size_t capacity;
|
||||
size_t reserve;
|
||||
size_t allocated;
|
||||
size_t used;
|
||||
size_t free;
|
||||
size_t used;
|
||||
size_t live;
|
||||
size_t allocated;
|
||||
size_t garbage;
|
||||
} _at_mark_end;
|
||||
|
||||
static struct ZAtRelocateStart {
|
||||
size_t capacity;
|
||||
size_t reserve;
|
||||
size_t garbage;
|
||||
size_t allocated;
|
||||
size_t reclaimed;
|
||||
size_t used;
|
||||
size_t free;
|
||||
size_t used;
|
||||
size_t allocated;
|
||||
size_t garbage;
|
||||
size_t reclaimed;
|
||||
} _at_relocate_start;
|
||||
|
||||
static struct ZAtRelocateEnd {
|
||||
size_t capacity;
|
||||
size_t capacity_high;
|
||||
size_t capacity_low;
|
||||
size_t reserve;
|
||||
size_t reserve_high;
|
||||
size_t reserve_low;
|
||||
size_t garbage;
|
||||
size_t allocated;
|
||||
size_t reclaimed;
|
||||
size_t used;
|
||||
size_t used_high;
|
||||
size_t used_low;
|
||||
size_t free;
|
||||
size_t free_high;
|
||||
size_t free_low;
|
||||
size_t used;
|
||||
size_t used_high;
|
||||
size_t used_low;
|
||||
size_t allocated;
|
||||
size_t garbage;
|
||||
size_t reclaimed;
|
||||
} _at_relocate_end;
|
||||
|
||||
static size_t capacity_high();
|
||||
static size_t capacity_low();
|
||||
static size_t available(size_t used);
|
||||
static size_t reserve(size_t used);
|
||||
static size_t free(size_t used);
|
||||
static size_t allocated(size_t used, size_t reclaimed);
|
||||
static size_t garbage(size_t reclaimed);
|
||||
|
||||
public:
|
||||
static void set_at_initialize(size_t min_capacity,
|
||||
size_t max_capacity,
|
||||
size_t max_reserve);
|
||||
static void set_at_mark_start(size_t soft_max_capacity,
|
||||
size_t capacity,
|
||||
size_t used);
|
||||
static void set_at_mark_end(size_t capacity,
|
||||
size_t allocated,
|
||||
size_t used);
|
||||
static void set_at_select_relocation_set(const ZRelocationSetSelectorStats& stats,
|
||||
size_t reclaimed);
|
||||
static void set_at_relocate_start(size_t capacity,
|
||||
size_t allocated,
|
||||
size_t used);
|
||||
static void set_at_relocate_end(size_t capacity,
|
||||
size_t allocated,
|
||||
size_t reclaimed,
|
||||
size_t used,
|
||||
size_t used_high,
|
||||
size_t used_low);
|
||||
static void set_at_initialize(const ZPageAllocatorStats& stats);
|
||||
static void set_at_mark_start(const ZPageAllocatorStats& stats);
|
||||
static void set_at_mark_end(const ZPageAllocatorStats& stats);
|
||||
static void set_at_select_relocation_set(const ZRelocationSetSelectorStats& stats);
|
||||
static void set_at_relocate_start(const ZPageAllocatorStats& stats);
|
||||
static void set_at_relocate_end(const ZPageAllocatorStats& stats);
|
||||
|
||||
static size_t max_capacity();
|
||||
static size_t used_at_mark_start();
|
||||
|
@ -37,7 +37,8 @@ public:
|
||||
|
||||
// Object
|
||||
static size_t object_size(uintptr_t addr);
|
||||
static void object_copy(uintptr_t from, uintptr_t to, size_t size);
|
||||
static void object_copy_disjoint(uintptr_t from, uintptr_t to, size_t size);
|
||||
static void object_copy_conjoint(uintptr_t from, uintptr_t to, size_t size);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZUTILS_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -45,8 +45,14 @@ inline size_t ZUtils::object_size(uintptr_t addr) {
|
||||
return words_to_bytes(ZOop::from_address(addr)->size());
|
||||
}
|
||||
|
||||
inline void ZUtils::object_copy(uintptr_t from, uintptr_t to, size_t size) {
|
||||
inline void ZUtils::object_copy_disjoint(uintptr_t from, uintptr_t to, size_t size) {
|
||||
Copy::aligned_disjoint_words((HeapWord*)from, (HeapWord*)to, bytes_to_words(size));
|
||||
}
|
||||
|
||||
inline void ZUtils::object_copy_conjoint(uintptr_t from, uintptr_t to, size_t size) {
|
||||
if (from != to) {
|
||||
Copy::aligned_conjoint_words((HeapWord*)from, (HeapWord*)to, bytes_to_words(size));
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZUTILS_INLINE_HPP
|
||||
|
@ -59,6 +59,9 @@
|
||||
"Time between statistics print outs (in seconds)") \
|
||||
range(1, (uint)-1) \
|
||||
\
|
||||
product(bool, ZStressRelocateInPlace, false, DIAGNOSTIC, \
|
||||
"Always relocate pages in-place") \
|
||||
\
|
||||
product(bool, ZVerifyViews, false, DIAGNOSTIC, \
|
||||
"Verify heap view accesses") \
|
||||
\
|
||||
|
@ -1010,14 +1010,12 @@
|
||||
<Field type="ulong" contentType="bytes" name="committed" label="Committed" />
|
||||
<Field type="uint" name="segments" label="Segments" />
|
||||
<Field type="boolean" name="nonBlocking" label="Non-blocking" />
|
||||
<Field type="boolean" name="noReserve" label="No Reserve" />
|
||||
</Event>
|
||||
|
||||
<Event name="ZRelocationSet" category="Java Virtual Machine, GC, Detailed" label="ZGC Relocation Set" thread="true">
|
||||
<Field type="ulong" contentType="bytes" name="total" label="Total" />
|
||||
<Field type="ulong" contentType="bytes" name="empty" label="Empty" />
|
||||
<Field type="ulong" contentType="bytes" name="compactingFrom" label="Compacting From" />
|
||||
<Field type="ulong" contentType="bytes" name="compactingTo" label="Compacting To" />
|
||||
<Field type="ulong" contentType="bytes" name="relocate" label="Relocate" />
|
||||
</Event>
|
||||
|
||||
<Event name="ZRelocationSetGroup" category="Java Virtual Machine, GC, Detailed" label="ZGC Relocation Set Group" thread="true">
|
||||
@ -1025,8 +1023,7 @@
|
||||
<Field type="ulong" name="pages" label="Pages" />
|
||||
<Field type="ulong" contentType="bytes" name="total" label="Total" />
|
||||
<Field type="ulong" contentType="bytes" name="empty" label="Empty" />
|
||||
<Field type="ulong" contentType="bytes" name="compactingFrom" label="Compacting From" />
|
||||
<Field type="ulong" contentType="bytes" name="compactingTo" label="Compacting To" />
|
||||
<Field type="ulong" contentType="bytes" name="relocate" label="Relocate" />
|
||||
</Event>
|
||||
|
||||
<Event name="ZStatisticsCounter" category="Java Virtual Machine, GC, Detailed" label="ZGC Statistics Counter" thread="true" experimental="true">
|
||||
|
@ -67,7 +67,9 @@ public:
|
||||
for (size_t i = 0; i < entries_to_check; i++) {
|
||||
uintptr_t from_index = SequenceToFromIndex::one_to_one(i);
|
||||
|
||||
EXPECT_FALSE(forwarding->find(from_index).populated()) << CAPTURE2(from_index, size);
|
||||
ZForwardingCursor cursor;
|
||||
ZForwardingEntry entry = forwarding->find(from_index, &cursor);
|
||||
EXPECT_FALSE(entry.populated()) << CAPTURE2(from_index, size);
|
||||
}
|
||||
}
|
||||
|
||||
@ -90,7 +92,8 @@ public:
|
||||
for (size_t i = 0; i < entries_to_populate; i++) {
|
||||
uintptr_t from_index = SequenceToFromIndex::one_to_one(i);
|
||||
|
||||
ZForwardingEntry entry = forwarding->find(from_index);
|
||||
ZForwardingCursor cursor;
|
||||
ZForwardingEntry entry = forwarding->find(from_index, &cursor);
|
||||
ASSERT_TRUE(entry.populated()) << CAPTURE2(from_index, size);
|
||||
|
||||
ASSERT_EQ(entry.from_index(), from_index) << CAPTURE(size);
|
||||
@ -132,7 +135,8 @@ public:
|
||||
for (size_t i = 0; i < entries_to_populate; i++) {
|
||||
uintptr_t from_index = SequenceToFromIndex::odd(i);
|
||||
|
||||
ZForwardingEntry entry = forwarding->find(from_index);
|
||||
ZForwardingCursor cursor;
|
||||
ZForwardingEntry entry = forwarding->find(from_index, &cursor);
|
||||
|
||||
ASSERT_FALSE(entry.populated()) << CAPTURE2(from_index, size);
|
||||
}
|
||||
|
74
test/hotspot/jtreg/gc/z/TestRelocateInPlace.java
Normal file
74
test/hotspot/jtreg/gc/z/TestRelocateInPlace.java
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package gc.z;
|
||||
|
||||
/*
|
||||
* @test TestRelocateInPlace
|
||||
* @requires vm.gc.Z
|
||||
* @summary Test ZGC in-place relocateion
|
||||
* @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+stats=off -Xmx256M -XX:+UnlockDiagnosticVMOptions -XX:+ZStressRelocateInPlace gc.z.TestRelocateInPlace
|
||||
*/
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
public class TestRelocateInPlace {
|
||||
private static final int allocSize = 100 * 1024 * 1024; // 100M
|
||||
private static final int smallObjectSize = 4 * 1024; // 4K
|
||||
private static final int mediumObjectSize = 2 * 1024 * 1024; // 2M
|
||||
|
||||
private static volatile ArrayList<byte[]> keepAlive;
|
||||
|
||||
private static void allocate(int objectSize) {
|
||||
keepAlive = new ArrayList<>();
|
||||
for (int i = 0; i < allocSize; i+= objectSize) {
|
||||
keepAlive.add(new byte[objectSize]);
|
||||
}
|
||||
}
|
||||
|
||||
private static void fragment() {
|
||||
// Release every other reference to cause lots of fragmentation
|
||||
for (int i = 0; i < keepAlive.size(); i += 2) {
|
||||
keepAlive.set(i, null);
|
||||
}
|
||||
}
|
||||
|
||||
private static void test(int objectSize) throws Exception {
|
||||
System.out.println("Allocating");
|
||||
allocate(objectSize);
|
||||
|
||||
System.out.println("Fragmenting");
|
||||
fragment();
|
||||
|
||||
System.out.println("Reclaiming");
|
||||
System.gc();
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
System.out.println("Iteration " + i);
|
||||
test(smallObjectSize);
|
||||
test(mediumObjectSize);
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user