8335902: Parallel: Refactor VM_ParallelGCFailedAllocation and VM_ParallelGCSystemGC
Reviewed-by: gli, zgu
This commit is contained in:
parent
2fc7eb44a0
commit
34d8562a91
@ -263,13 +263,20 @@ bool ParallelScavengeHeap::requires_barriers(stackChunkOop p) const {
|
|||||||
// and the rest will not be executed. For that reason, this method loops
|
// and the rest will not be executed. For that reason, this method loops
|
||||||
// during failed allocation attempts. If the java heap becomes exhausted,
|
// during failed allocation attempts. If the java heap becomes exhausted,
|
||||||
// we rely on the size_policy object to force a bail out.
|
// we rely on the size_policy object to force a bail out.
|
||||||
HeapWord* ParallelScavengeHeap::mem_allocate(
|
HeapWord* ParallelScavengeHeap::mem_allocate(size_t size,
|
||||||
size_t size,
|
bool* gc_overhead_limit_was_exceeded) {
|
||||||
bool* gc_overhead_limit_was_exceeded) {
|
|
||||||
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
|
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
|
||||||
assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
|
assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
|
||||||
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
|
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
|
||||||
|
|
||||||
|
bool is_tlab = false;
|
||||||
|
return mem_allocate_work(size, is_tlab, gc_overhead_limit_was_exceeded);
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size,
|
||||||
|
bool is_tlab,
|
||||||
|
bool* gc_overhead_limit_was_exceeded) {
|
||||||
|
|
||||||
// In general gc_overhead_limit_was_exceeded should be false so
|
// In general gc_overhead_limit_was_exceeded should be false so
|
||||||
// set it so here and reset it to true only if the gc time
|
// set it so here and reset it to true only if the gc time
|
||||||
// limit is being exceeded as checked below.
|
// limit is being exceeded as checked below.
|
||||||
@ -303,9 +310,11 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If certain conditions hold, try allocating from the old gen.
|
// If certain conditions hold, try allocating from the old gen.
|
||||||
result = mem_allocate_old_gen(size);
|
if (!is_tlab) {
|
||||||
if (result != nullptr) {
|
result = mem_allocate_old_gen(size);
|
||||||
return result;
|
if (result != nullptr) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
|
if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
|
||||||
@ -338,7 +347,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
|||||||
|
|
||||||
if (result == nullptr) {
|
if (result == nullptr) {
|
||||||
// Generate a VM operation
|
// Generate a VM operation
|
||||||
VM_ParallelGCFailedAllocation op(size, gc_count);
|
VM_ParallelCollectForAllocation op(size, is_tlab, gc_count);
|
||||||
VMThread::execute(&op);
|
VMThread::execute(&op);
|
||||||
|
|
||||||
// Did the VM operation execute? If so, return the result directly.
|
// Did the VM operation execute? If so, return the result directly.
|
||||||
@ -395,23 +404,6 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
// A "death march" is a series of ultra-slow allocations in which a full gc is
|
|
||||||
// done before each allocation, and after the full gc the allocation still
|
|
||||||
// cannot be satisfied from the young gen. This routine detects that condition;
|
|
||||||
// it should be called after a full gc has been done and the allocation
|
|
||||||
// attempted from the young gen. The parameter 'addr' should be the result of
|
|
||||||
// that young gen allocation attempt.
|
|
||||||
void
|
|
||||||
ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
|
|
||||||
if (addr != nullptr) {
|
|
||||||
_death_march_count = 0; // death march has ended
|
|
||||||
} else if (_death_march_count == 0) {
|
|
||||||
if (should_alloc_in_eden(size)) {
|
|
||||||
_death_march_count = 1; // death march has started
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapWord* ParallelScavengeHeap::allocate_old_gen_and_record(size_t size) {
|
HeapWord* ParallelScavengeHeap::allocate_old_gen_and_record(size_t size) {
|
||||||
assert_locked_or_safepoint(Heap_lock);
|
assert_locked_or_safepoint(Heap_lock);
|
||||||
HeapWord* res = old_gen()->allocate(size);
|
HeapWord* res = old_gen()->allocate(size);
|
||||||
@ -427,74 +419,76 @@ HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
|
|||||||
return allocate_old_gen_and_record(size);
|
return allocate_old_gen_and_record(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a "death march" is in progress, allocate from the old gen a limited
|
|
||||||
// number of times before doing a GC.
|
|
||||||
if (_death_march_count > 0) {
|
|
||||||
if (_death_march_count < 64) {
|
|
||||||
++_death_march_count;
|
|
||||||
return allocate_old_gen_and_record(size);
|
|
||||||
} else {
|
|
||||||
_death_march_count = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
|
void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
|
||||||
|
if (GCLocker::check_active_before_gc()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
PSParallelCompact::invoke(clear_all_soft_refs);
|
PSParallelCompact::invoke(clear_all_soft_refs);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Failed allocation policy. Must be called from the VM thread, and
|
HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
|
||||||
// only at a safepoint! Note that this method has policy for allocation
|
HeapWord* result = nullptr;
|
||||||
// flow, and NOT collection policy. So we do not check for gc collection
|
|
||||||
// time over limit here, that is the responsibility of the heap specific
|
|
||||||
// collection methods. This method decides where to attempt allocations,
|
|
||||||
// and when to attempt collections, but no collection specific policy.
|
|
||||||
HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
|
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
|
||||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
|
||||||
assert(!is_stw_gc_active(), "not reentrant");
|
|
||||||
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
|
|
||||||
|
|
||||||
// We assume that allocation in eden will fail unless we collect.
|
result = young_gen()->allocate(size);
|
||||||
|
if (result == nullptr && !is_tlab) {
|
||||||
// First level allocation failure, scavenge and allocate in young gen.
|
// auto expand inside
|
||||||
GCCauseSetter gccs(this, GCCause::_allocation_failure);
|
result = old_gen()->allocate(size);
|
||||||
const bool invoked_full_gc = PSScavenge::invoke();
|
|
||||||
HeapWord* result = young_gen()->allocate(size);
|
|
||||||
|
|
||||||
// Second level allocation failure.
|
|
||||||
// Mark sweep and allocate in young generation.
|
|
||||||
if (result == nullptr && !invoked_full_gc) {
|
|
||||||
do_full_collection(false);
|
|
||||||
result = young_gen()->allocate(size);
|
|
||||||
}
|
}
|
||||||
|
return result; // Could be null if we are out of space.
|
||||||
death_march_check(result, size);
|
|
||||||
|
|
||||||
// Third level allocation failure.
|
|
||||||
// After mark sweep and young generation allocation failure,
|
|
||||||
// allocate in old generation.
|
|
||||||
if (result == nullptr) {
|
|
||||||
result = allocate_old_gen_and_record(size);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fourth level allocation failure. We're running out of memory.
|
|
||||||
// More complete mark sweep and allocate in young generation.
|
|
||||||
if (result == nullptr) {
|
|
||||||
do_full_collection(true);
|
|
||||||
result = young_gen()->allocate(size);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fifth level allocation failure.
|
|
||||||
// After more complete mark sweep, allocate in old generation.
|
|
||||||
if (result == nullptr) {
|
|
||||||
result = allocate_old_gen_and_record(size);
|
|
||||||
}
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
HeapWord* ParallelScavengeHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
|
||||||
|
assert(size != 0, "precondition");
|
||||||
|
|
||||||
|
HeapWord* result = nullptr;
|
||||||
|
|
||||||
|
GCLocker::check_active_before_gc();
|
||||||
|
if (GCLocker::is_active_and_needs_gc()) {
|
||||||
|
return expand_heap_and_allocate(size, is_tlab);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If young-gen can handle this allocation, attempt young-gc firstly.
|
||||||
|
bool should_run_young_gc = is_tlab || should_alloc_in_eden(size);
|
||||||
|
collect_at_safepoint(!should_run_young_gc);
|
||||||
|
|
||||||
|
result = expand_heap_and_allocate(size, is_tlab);
|
||||||
|
if (result != nullptr) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we reach this point, we're really out of memory. Try every trick
|
||||||
|
// we can to reclaim memory. Force collection of soft references. Force
|
||||||
|
// a complete compaction of the heap. Any additional methods for finding
|
||||||
|
// free memory should be here, especially if they are expensive. If this
|
||||||
|
// attempt fails, an OOM exception will be thrown.
|
||||||
|
{
|
||||||
|
// Make sure the heap is fully compacted
|
||||||
|
uintx old_interval = HeapMaximumCompactionInterval;
|
||||||
|
HeapMaximumCompactionInterval = 0;
|
||||||
|
|
||||||
|
const bool clear_all_soft_refs = true;
|
||||||
|
PSParallelCompact::invoke(clear_all_soft_refs);
|
||||||
|
|
||||||
|
// Restore
|
||||||
|
HeapMaximumCompactionInterval = old_interval;
|
||||||
|
}
|
||||||
|
|
||||||
|
result = expand_heap_and_allocate(size, is_tlab);
|
||||||
|
if (result != nullptr) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// What else? We might try synchronous finalization later. If the total
|
||||||
|
// space available is large enough for the allocation, then a more
|
||||||
|
// complete compaction phase than we've tried so far might be
|
||||||
|
// appropriate.
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
|
void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
|
||||||
CollectedHeap::ensure_parsability(retire_tlabs);
|
CollectedHeap::ensure_parsability(retire_tlabs);
|
||||||
young_gen()->eden_space()->ensure_parsability();
|
young_gen()->eden_space()->ensure_parsability();
|
||||||
@ -513,7 +507,10 @@ size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
|
HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
|
||||||
HeapWord* result = young_gen()->allocate(requested_size);
|
bool dummy;
|
||||||
|
HeapWord* result = mem_allocate_work(requested_size /* size */,
|
||||||
|
true /* is_tlab */,
|
||||||
|
&dummy);
|
||||||
if (result != nullptr) {
|
if (result != nullptr) {
|
||||||
*actual_size = requested_size;
|
*actual_size = requested_size;
|
||||||
}
|
}
|
||||||
@ -533,7 +530,6 @@ void ParallelScavengeHeap::prune_unlinked_nmethods() {
|
|||||||
ScavengableNMethods::prune_unlinked_nmethods();
|
ScavengableNMethods::prune_unlinked_nmethods();
|
||||||
}
|
}
|
||||||
|
|
||||||
// This method is used by System.gc() and JVMTI.
|
|
||||||
void ParallelScavengeHeap::collect(GCCause::Cause cause) {
|
void ParallelScavengeHeap::collect(GCCause::Cause cause) {
|
||||||
assert(!Heap_lock->owned_by_self(),
|
assert(!Heap_lock->owned_by_self(),
|
||||||
"this thread should not own the Heap_lock");
|
"this thread should not own the Heap_lock");
|
||||||
@ -552,10 +548,10 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
|
VM_ParallelGCCollect op(gc_count, full_gc_count, cause);
|
||||||
VMThread::execute(&op);
|
VMThread::execute(&op);
|
||||||
|
|
||||||
if (!GCCause::is_explicit_full_gc(cause) || op.full_gc_succeeded()) {
|
if (!GCCause::is_explicit_full_gc(cause)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -573,6 +569,33 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ParallelScavengeHeap::try_collect_at_safepoint(bool full) {
|
||||||
|
assert(SafepointSynchronize::is_at_safepoint(), "precondition");
|
||||||
|
if (GCLocker::check_active_before_gc()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
collect_at_safepoint(full);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ParallelScavengeHeap::must_clear_all_soft_refs() {
|
||||||
|
return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
|
||||||
|
_gc_cause == GCCause::_wb_full_gc;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ParallelScavengeHeap::collect_at_safepoint(bool full) {
|
||||||
|
assert(!GCLocker::is_active(), "precondition");
|
||||||
|
bool clear_soft_refs = must_clear_all_soft_refs();
|
||||||
|
|
||||||
|
if (!full) {
|
||||||
|
bool success = PSScavenge::invoke(clear_soft_refs);
|
||||||
|
if (success) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Upgrade to Full-GC if young-gc fails
|
||||||
|
}
|
||||||
|
PSParallelCompact::invoke(clear_soft_refs);
|
||||||
|
}
|
||||||
|
|
||||||
void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
|
void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
|
||||||
young_gen()->object_iterate(cl);
|
young_gen()->object_iterate(cl);
|
||||||
old_gen()->object_iterate(cl);
|
old_gen()->object_iterate(cl);
|
||||||
|
@ -75,8 +75,6 @@ class ParallelScavengeHeap : public CollectedHeap {
|
|||||||
static PSAdaptiveSizePolicy* _size_policy;
|
static PSAdaptiveSizePolicy* _size_policy;
|
||||||
static PSGCAdaptivePolicyCounters* _gc_policy_counters;
|
static PSGCAdaptivePolicyCounters* _gc_policy_counters;
|
||||||
|
|
||||||
unsigned int _death_march_count;
|
|
||||||
|
|
||||||
GCMemoryManager* _young_manager;
|
GCMemoryManager* _young_manager;
|
||||||
GCMemoryManager* _old_manager;
|
GCMemoryManager* _old_manager;
|
||||||
|
|
||||||
@ -96,17 +94,27 @@ class ParallelScavengeHeap : public CollectedHeap {
|
|||||||
|
|
||||||
void update_parallel_worker_threads_cpu_time();
|
void update_parallel_worker_threads_cpu_time();
|
||||||
|
|
||||||
protected:
|
void collect_at_safepoint(bool full);
|
||||||
|
|
||||||
|
bool must_clear_all_soft_refs();
|
||||||
|
|
||||||
HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) override;
|
HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) override;
|
||||||
|
|
||||||
inline bool should_alloc_in_eden(size_t size) const;
|
inline bool should_alloc_in_eden(size_t size) const;
|
||||||
inline void death_march_check(HeapWord* const result, size_t size);
|
|
||||||
HeapWord* mem_allocate_old_gen(size_t size);
|
HeapWord* mem_allocate_old_gen(size_t size);
|
||||||
|
|
||||||
public:
|
HeapWord* mem_allocate_work(size_t size,
|
||||||
|
bool is_tlab,
|
||||||
|
bool* gc_overhead_limit_was_exceeded);
|
||||||
|
|
||||||
|
HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
|
||||||
|
|
||||||
|
void do_full_collection(bool clear_all_soft_refs) override;
|
||||||
|
|
||||||
|
public:
|
||||||
ParallelScavengeHeap() :
|
ParallelScavengeHeap() :
|
||||||
CollectedHeap(),
|
CollectedHeap(),
|
||||||
_death_march_count(0),
|
|
||||||
_young_manager(nullptr),
|
_young_manager(nullptr),
|
||||||
_old_manager(nullptr),
|
_old_manager(nullptr),
|
||||||
_eden_pool(nullptr),
|
_eden_pool(nullptr),
|
||||||
@ -184,25 +192,12 @@ class ParallelScavengeHeap : public CollectedHeap {
|
|||||||
// "gc_time_limit_was_exceeded" has an undefined meaning.
|
// "gc_time_limit_was_exceeded" has an undefined meaning.
|
||||||
HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) override;
|
HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) override;
|
||||||
|
|
||||||
// Allocation attempt(s) during a safepoint. It should never be called
|
HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
|
||||||
// to allocate a new TLAB as this allocation might be satisfied out
|
|
||||||
// of the old generation.
|
|
||||||
HeapWord* failed_mem_allocate(size_t size);
|
|
||||||
|
|
||||||
// Support for System.gc()
|
// Support for System.gc()
|
||||||
void collect(GCCause::Cause cause) override;
|
void collect(GCCause::Cause cause) override;
|
||||||
|
|
||||||
// These also should be called by the vm thread at a safepoint (e.g., from a
|
void try_collect_at_safepoint(bool full);
|
||||||
// VM operation).
|
|
||||||
//
|
|
||||||
// The first collects the young generation only, unless the scavenge fails; it
|
|
||||||
// will then attempt a full gc. The second collects the entire heap; if
|
|
||||||
// maximum_compaction is true, it will compact everything and clear all soft
|
|
||||||
// references.
|
|
||||||
inline bool invoke_scavenge();
|
|
||||||
|
|
||||||
// Perform a full collection
|
|
||||||
void do_full_collection(bool clear_all_soft_refs) override;
|
|
||||||
|
|
||||||
void ensure_parsability(bool retire_tlabs) override;
|
void ensure_parsability(bool retire_tlabs) override;
|
||||||
void resize_all_tlabs() override;
|
void resize_all_tlabs() override;
|
||||||
|
@ -34,10 +34,6 @@ inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const
|
|||||||
return size < eden_size / 2;
|
return size < eden_size / 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool ParallelScavengeHeap::invoke_scavenge() {
|
|
||||||
return PSScavenge::invoke();
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool ParallelScavengeHeap::is_in_young(const void* p) const {
|
inline bool ParallelScavengeHeap::is_in_young(const void* p) const {
|
||||||
// Assumes the old gen address range is lower than that of the young gen.
|
// Assumes the old gen address range is lower than that of the young gen.
|
||||||
bool result = p >= young_gen()->reserved().start();
|
bool result = p >= young_gen()->reserved().start();
|
||||||
|
@ -128,14 +128,6 @@ PSGCAdaptivePolicyCounters::PSGCAdaptivePolicyCounters(const char* name_arg,
|
|||||||
_major_pause_young_slope = PerfDataManager::create_variable(SUN_GC, cname,
|
_major_pause_young_slope = PerfDataManager::create_variable(SUN_GC, cname,
|
||||||
PerfData::U_None, (jlong) 0, CHECK);
|
PerfData::U_None, (jlong) 0, CHECK);
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "scavengeSkipped");
|
|
||||||
_scavenge_skipped = PerfDataManager::create_variable(SUN_GC, cname,
|
|
||||||
PerfData::U_Bytes, (jlong) 0, CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "fullFollowsScavenge");
|
|
||||||
_full_follows_scavenge = PerfDataManager::create_variable(SUN_GC, cname,
|
|
||||||
PerfData::U_Bytes, (jlong) 0, CHECK);
|
|
||||||
|
|
||||||
_counter_time_stamp.update();
|
_counter_time_stamp.update();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,9 +61,6 @@ class PSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
|
|||||||
PerfVariable* _minor_pause_old_slope;
|
PerfVariable* _minor_pause_old_slope;
|
||||||
PerfVariable* _major_pause_young_slope;
|
PerfVariable* _major_pause_young_slope;
|
||||||
|
|
||||||
PerfVariable* _scavenge_skipped;
|
|
||||||
PerfVariable* _full_follows_scavenge;
|
|
||||||
|
|
||||||
// Use this time stamp if the gc time stamp is not available.
|
// Use this time stamp if the gc time stamp is not available.
|
||||||
TimeStamp _counter_time_stamp;
|
TimeStamp _counter_time_stamp;
|
||||||
|
|
||||||
@ -180,14 +177,6 @@ class PSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
|
|||||||
(jlong)(ps_size_policy()->live_at_last_full_gc()));
|
(jlong)(ps_size_policy()->live_at_last_full_gc()));
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void update_scavenge_skipped(int cause) {
|
|
||||||
_scavenge_skipped->set_value(cause);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_full_follows_scavenge(int event) {
|
|
||||||
_full_follows_scavenge->set_value(event);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update all the counters that can be updated from the size policy.
|
// Update all the counters that can be updated from the size policy.
|
||||||
// This should be called after all policy changes have been made
|
// This should be called after all policy changes have been made
|
||||||
// and reflected internally in the size policy.
|
// and reflected internally in the size policy.
|
||||||
|
@ -51,6 +51,7 @@
|
|||||||
#include "gc/shared/gcTimer.hpp"
|
#include "gc/shared/gcTimer.hpp"
|
||||||
#include "gc/shared/gcTrace.hpp"
|
#include "gc/shared/gcTrace.hpp"
|
||||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||||
|
#include "gc/shared/gcVMOperations.hpp"
|
||||||
#include "gc/shared/isGCActiveMark.hpp"
|
#include "gc/shared/isGCActiveMark.hpp"
|
||||||
#include "gc/shared/oopStorage.inline.hpp"
|
#include "gc/shared/oopStorage.inline.hpp"
|
||||||
#include "gc/shared/oopStorageSet.inline.hpp"
|
#include "gc/shared/oopStorageSet.inline.hpp"
|
||||||
@ -968,6 +969,7 @@ bool PSParallelCompact::invoke(bool clear_all_soft_refs) {
|
|||||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(),
|
assert(Thread::current() == (Thread*)VMThread::vm_thread(),
|
||||||
"should be in vm thread");
|
"should be in vm thread");
|
||||||
|
|
||||||
|
SvcGCMarker sgcm(SvcGCMarker::FULL);
|
||||||
IsSTWGCActiveMark mark;
|
IsSTWGCActiveMark mark;
|
||||||
|
|
||||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||||
|
@ -42,6 +42,7 @@
|
|||||||
#include "gc/shared/gcTimer.hpp"
|
#include "gc/shared/gcTimer.hpp"
|
||||||
#include "gc/shared/gcTrace.hpp"
|
#include "gc/shared/gcTrace.hpp"
|
||||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||||
|
#include "gc/shared/gcVMOperations.hpp"
|
||||||
#include "gc/shared/isGCActiveMark.hpp"
|
#include "gc/shared/isGCActiveMark.hpp"
|
||||||
#include "gc/shared/oopStorage.inline.hpp"
|
#include "gc/shared/oopStorage.inline.hpp"
|
||||||
#include "gc/shared/oopStorageSetParState.inline.hpp"
|
#include "gc/shared/oopStorageSetParState.inline.hpp"
|
||||||
@ -221,42 +222,6 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// This method contains all heap specific policy for invoking scavenge.
|
|
||||||
// PSScavenge::invoke_no_policy() will do nothing but attempt to
|
|
||||||
// scavenge. It will not clean up after failed promotions, bail out if
|
|
||||||
// we've exceeded policy time limits, or any other special behavior.
|
|
||||||
// All such policy should be placed here.
|
|
||||||
//
|
|
||||||
// Note that this method should only be called from the vm_thread while
|
|
||||||
// at a safepoint!
|
|
||||||
bool PSScavenge::invoke() {
|
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
|
||||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
|
||||||
assert(!ParallelScavengeHeap::heap()->is_stw_gc_active(), "not reentrant");
|
|
||||||
|
|
||||||
ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap();
|
|
||||||
IsSTWGCActiveMark mark;
|
|
||||||
|
|
||||||
const bool scavenge_done = PSScavenge::invoke_no_policy();
|
|
||||||
const bool need_full_gc = !scavenge_done;
|
|
||||||
bool full_gc_done = false;
|
|
||||||
|
|
||||||
if (UsePerfData) {
|
|
||||||
PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
|
|
||||||
const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped;
|
|
||||||
counters->update_full_follows_scavenge(ffs_val);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (need_full_gc) {
|
|
||||||
GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
|
|
||||||
const bool clear_all_softrefs = heap->soft_ref_policy()->should_clear_all_soft_refs();
|
|
||||||
|
|
||||||
full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
|
|
||||||
}
|
|
||||||
|
|
||||||
return full_gc_done;
|
|
||||||
}
|
|
||||||
|
|
||||||
class PSThreadRootsTaskClosure : public ThreadClosure {
|
class PSThreadRootsTaskClosure : public ThreadClosure {
|
||||||
uint _worker_id;
|
uint _worker_id;
|
||||||
public:
|
public:
|
||||||
@ -288,14 +253,14 @@ class ScavengeRootsTask : public WorkerTask {
|
|||||||
public:
|
public:
|
||||||
ScavengeRootsTask(PSOldGen* old_gen,
|
ScavengeRootsTask(PSOldGen* old_gen,
|
||||||
uint active_workers) :
|
uint active_workers) :
|
||||||
WorkerTask("ScavengeRootsTask"),
|
WorkerTask("ScavengeRootsTask"),
|
||||||
_strong_roots_scope(active_workers),
|
_strong_roots_scope(active_workers),
|
||||||
_subtasks(ParallelRootType::sentinel),
|
_subtasks(ParallelRootType::sentinel),
|
||||||
_old_gen(old_gen),
|
_old_gen(old_gen),
|
||||||
_gen_top(old_gen->object_space()->top()),
|
_gen_top(old_gen->object_space()->top()),
|
||||||
_active_workers(active_workers),
|
_active_workers(active_workers),
|
||||||
_is_old_gen_empty(old_gen->object_space()->is_empty()),
|
_is_old_gen_empty(old_gen->object_space()->is_empty()),
|
||||||
_terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) {
|
_terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) {
|
||||||
if (!_is_old_gen_empty) {
|
if (!_is_old_gen_empty) {
|
||||||
PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
|
PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
|
||||||
card_table->pre_scavenge(active_workers);
|
card_table->pre_scavenge(active_workers);
|
||||||
@ -353,26 +318,23 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// This method contains no policy. You should probably
|
bool PSScavenge::invoke(bool clear_soft_refs) {
|
||||||
// be calling invoke() instead.
|
|
||||||
bool PSScavenge::invoke_no_policy() {
|
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
||||||
|
|
||||||
_gc_timer.register_gc_start();
|
|
||||||
|
|
||||||
if (GCLocker::check_active_before_gc()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
|
||||||
GCCause::Cause gc_cause = heap->gc_cause();
|
|
||||||
|
|
||||||
// Check for potential problems.
|
// Check for potential problems.
|
||||||
if (!should_attempt_scavenge()) {
|
if (!should_attempt_scavenge()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
IsSTWGCActiveMark mark;
|
||||||
|
|
||||||
|
_gc_timer.register_gc_start();
|
||||||
|
|
||||||
|
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||||
|
GCCause::Cause gc_cause = heap->gc_cause();
|
||||||
|
|
||||||
|
SvcGCMarker sgcm(SvcGCMarker::MINOR);
|
||||||
GCIdMark gc_id_mark;
|
GCIdMark gc_id_mark;
|
||||||
_gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
|
_gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
|
||||||
|
|
||||||
@ -425,7 +387,7 @@ bool PSScavenge::invoke_no_policy() {
|
|||||||
DerivedPointerTable::clear();
|
DerivedPointerTable::clear();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
reference_processor()->start_discovery(false /* always_clear */);
|
reference_processor()->start_discovery(clear_soft_refs);
|
||||||
|
|
||||||
const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
|
const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
|
||||||
|
|
||||||
@ -537,14 +499,13 @@ bool PSScavenge::invoke_no_policy() {
|
|||||||
size_t survivor_limit =
|
size_t survivor_limit =
|
||||||
size_policy->max_survivor_size(max_young_size);
|
size_policy->max_survivor_size(max_young_size);
|
||||||
_tenuring_threshold =
|
_tenuring_threshold =
|
||||||
size_policy->compute_survivor_space_size_and_threshold(
|
size_policy->compute_survivor_space_size_and_threshold(_survivor_overflow,
|
||||||
_survivor_overflow,
|
_tenuring_threshold,
|
||||||
_tenuring_threshold,
|
survivor_limit);
|
||||||
survivor_limit);
|
|
||||||
|
|
||||||
log_debug(gc, age)("Desired survivor size %zu bytes, new threshold %u (max threshold %u)",
|
log_debug(gc, age)("Desired survivor size %zu bytes, new threshold %u (max threshold %u)",
|
||||||
size_policy->calculated_survivor_size_in_bytes(),
|
size_policy->calculated_survivor_size_in_bytes(),
|
||||||
_tenuring_threshold, MaxTenuringThreshold);
|
_tenuring_threshold, MaxTenuringThreshold);
|
||||||
|
|
||||||
if (UsePerfData) {
|
if (UsePerfData) {
|
||||||
PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
|
PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
|
||||||
@ -568,8 +529,8 @@ bool PSScavenge::invoke_no_policy() {
|
|||||||
size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
|
size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
|
||||||
size_t max_old_gen_size = old_gen->max_gen_size();
|
size_t max_old_gen_size = old_gen->max_gen_size();
|
||||||
size_t max_eden_size = max_young_size -
|
size_t max_eden_size = max_young_size -
|
||||||
young_gen->from_space()->capacity_in_bytes() -
|
young_gen->from_space()->capacity_in_bytes() -
|
||||||
young_gen->to_space()->capacity_in_bytes();
|
young_gen->to_space()->capacity_in_bytes();
|
||||||
|
|
||||||
// Used for diagnostics
|
// Used for diagnostics
|
||||||
size_policy->clear_generation_free_space_flags();
|
size_policy->clear_generation_free_space_flags();
|
||||||
@ -599,7 +560,7 @@ bool PSScavenge::invoke_no_policy() {
|
|||||||
// a full collection. Don't resize the old gen here.
|
// a full collection. Don't resize the old gen here.
|
||||||
|
|
||||||
heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
|
heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
|
||||||
size_policy->calculated_survivor_size_in_bytes());
|
size_policy->calculated_survivor_size_in_bytes());
|
||||||
|
|
||||||
log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
|
log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
|
||||||
}
|
}
|
||||||
@ -657,20 +618,12 @@ void PSScavenge::clean_up_failed_promotion() {
|
|||||||
|
|
||||||
bool PSScavenge::should_attempt_scavenge() {
|
bool PSScavenge::should_attempt_scavenge() {
|
||||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||||
PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
|
|
||||||
|
|
||||||
if (UsePerfData) {
|
|
||||||
counters->update_scavenge_skipped(not_skipped);
|
|
||||||
}
|
|
||||||
|
|
||||||
PSYoungGen* young_gen = heap->young_gen();
|
PSYoungGen* young_gen = heap->young_gen();
|
||||||
PSOldGen* old_gen = heap->old_gen();
|
PSOldGen* old_gen = heap->old_gen();
|
||||||
|
|
||||||
// Do not attempt to promote unless to_space is empty
|
|
||||||
if (!young_gen->to_space()->is_empty()) {
|
if (!young_gen->to_space()->is_empty()) {
|
||||||
if (UsePerfData) {
|
// To-space is not empty; should run full-gc instead.
|
||||||
counters->update_scavenge_skipped(to_space_not_empty);
|
|
||||||
}
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -687,15 +640,7 @@ bool PSScavenge::should_attempt_scavenge() {
|
|||||||
result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(),
|
result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(),
|
||||||
(size_t) policy->padded_average_promoted_in_bytes(),
|
(size_t) policy->padded_average_promoted_in_bytes(),
|
||||||
free_in_old_gen);
|
free_in_old_gen);
|
||||||
if (young_gen->used_in_bytes() < (size_t) policy->padded_average_promoted_in_bytes()) {
|
|
||||||
log_trace(ergo)(" padded_promoted_average is greater than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!result) {
|
|
||||||
if (UsePerfData) {
|
|
||||||
counters->update_scavenge_skipped(promoted_too_large);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,13 +45,6 @@ class PSScavenge: AllStatic {
|
|||||||
friend class PSKeepAliveClosure;
|
friend class PSKeepAliveClosure;
|
||||||
friend class PSPromotionManager;
|
friend class PSPromotionManager;
|
||||||
|
|
||||||
enum ScavengeSkippedCause {
|
|
||||||
not_skipped = 0,
|
|
||||||
to_space_not_empty,
|
|
||||||
promoted_too_large,
|
|
||||||
full_follows_scavenge
|
|
||||||
};
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Flags/counters
|
// Flags/counters
|
||||||
static SpanSubjectToDiscoveryClosure _span_based_discoverer;
|
static SpanSubjectToDiscoveryClosure _span_based_discoverer;
|
||||||
@ -105,10 +98,9 @@ class PSScavenge: AllStatic {
|
|||||||
// Called by parallelScavengeHeap to init the tenuring threshold
|
// Called by parallelScavengeHeap to init the tenuring threshold
|
||||||
static void initialize();
|
static void initialize();
|
||||||
|
|
||||||
// Scavenge entry point. This may invoke a full gc; return true if so.
|
// Scavenge entry point.
|
||||||
static bool invoke();
|
// Return true iff a young-gc is completed without promotion-failure.
|
||||||
// Return true if a collection was done; false otherwise.
|
static bool invoke(bool clear_soft_refs);
|
||||||
static bool invoke_no_policy();
|
|
||||||
|
|
||||||
template <class T> static inline bool should_scavenge(T* p);
|
template <class T> static inline bool should_scavenge(T* p);
|
||||||
|
|
||||||
|
@ -31,19 +31,19 @@
|
|||||||
#include "utilities/dtrace.hpp"
|
#include "utilities/dtrace.hpp"
|
||||||
|
|
||||||
// The following methods are used by the parallel scavenge collector
|
// The following methods are used by the parallel scavenge collector
|
||||||
VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t word_size,
|
VM_ParallelCollectForAllocation::VM_ParallelCollectForAllocation(size_t word_size,
|
||||||
uint gc_count) :
|
bool is_tlab,
|
||||||
VM_CollectForAllocation(word_size, gc_count, GCCause::_allocation_failure) {
|
uint gc_count) :
|
||||||
|
VM_CollectForAllocation(word_size, gc_count, GCCause::_allocation_failure),
|
||||||
|
_is_tlab(is_tlab) {
|
||||||
assert(word_size != 0, "An allocation should always be requested with this operation.");
|
assert(word_size != 0, "An allocation should always be requested with this operation.");
|
||||||
}
|
}
|
||||||
|
|
||||||
void VM_ParallelGCFailedAllocation::doit() {
|
void VM_ParallelCollectForAllocation::doit() {
|
||||||
SvcGCMarker sgcm(SvcGCMarker::MINOR);
|
|
||||||
|
|
||||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||||
|
|
||||||
GCCauseSetter gccs(heap, _gc_cause);
|
GCCauseSetter gccs(heap, _gc_cause);
|
||||||
_result = heap->failed_mem_allocate(_word_size);
|
_result = heap->satisfy_failed_allocation(_word_size, _is_tlab);
|
||||||
|
|
||||||
if (_result == nullptr && GCLocker::is_active_and_needs_gc()) {
|
if (_result == nullptr && GCLocker::is_active_and_needs_gc()) {
|
||||||
set_gc_locked();
|
set_gc_locked();
|
||||||
@ -56,24 +56,14 @@ static bool is_cause_full(GCCause::Cause cause) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Only used for System.gc() calls
|
// Only used for System.gc() calls
|
||||||
VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(uint gc_count,
|
VM_ParallelGCCollect::VM_ParallelGCCollect(uint gc_count,
|
||||||
uint full_gc_count,
|
uint full_gc_count,
|
||||||
GCCause::Cause gc_cause) :
|
GCCause::Cause gc_cause) :
|
||||||
VM_GC_Operation(gc_count, gc_cause, full_gc_count, is_cause_full(gc_cause)),
|
VM_GC_Operation(gc_count, gc_cause, full_gc_count, is_cause_full(gc_cause)) {}
|
||||||
_full_gc_succeeded(false)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void VM_ParallelGCSystemGC::doit() {
|
|
||||||
SvcGCMarker sgcm(SvcGCMarker::FULL);
|
|
||||||
|
|
||||||
|
void VM_ParallelGCCollect::doit() {
|
||||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||||
|
|
||||||
GCCauseSetter gccs(heap, _gc_cause);
|
GCCauseSetter gccs(heap, _gc_cause);
|
||||||
if (!_full) {
|
heap->try_collect_at_safepoint(_full);
|
||||||
// If (and only if) the scavenge fails, this will invoke a full gc.
|
|
||||||
_full_gc_succeeded = heap->invoke_scavenge();
|
|
||||||
} else {
|
|
||||||
_full_gc_succeeded = PSParallelCompact::invoke(false);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -29,23 +29,22 @@
|
|||||||
#include "gc/shared/gcCause.hpp"
|
#include "gc/shared/gcCause.hpp"
|
||||||
#include "gc/shared/gcVMOperations.hpp"
|
#include "gc/shared/gcVMOperations.hpp"
|
||||||
|
|
||||||
class VM_ParallelGCFailedAllocation : public VM_CollectForAllocation {
|
class VM_ParallelCollectForAllocation : public VM_CollectForAllocation {
|
||||||
public:
|
bool _is_tlab;
|
||||||
VM_ParallelGCFailedAllocation(size_t word_size, uint gc_count);
|
public:
|
||||||
|
VM_ParallelCollectForAllocation(size_t word_size, bool is_tlab, uint gc_count);
|
||||||
|
|
||||||
virtual VMOp_Type type() const {
|
virtual VMOp_Type type() const {
|
||||||
return VMOp_ParallelGCFailedAllocation;
|
return VMOp_ParallelCollectForAllocation;
|
||||||
}
|
}
|
||||||
virtual void doit();
|
virtual void doit();
|
||||||
};
|
};
|
||||||
|
|
||||||
class VM_ParallelGCSystemGC: public VM_GC_Operation {
|
class VM_ParallelGCCollect: public VM_GC_Operation {
|
||||||
bool _full_gc_succeeded;
|
|
||||||
public:
|
public:
|
||||||
VM_ParallelGCSystemGC(uint gc_count, uint full_gc_count, GCCause::Cause gc_cause);
|
VM_ParallelGCCollect(uint gc_count, uint full_gc_count, GCCause::Cause gc_cause);
|
||||||
virtual VMOp_Type type() const { return VMOp_ParallelGCSystemGC; }
|
virtual VMOp_Type type() const { return VMOp_ParallelGCCollect; }
|
||||||
virtual void doit();
|
virtual void doit();
|
||||||
bool full_gc_succeeded() const { return _full_gc_succeeded; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_GC_PARALLEL_PSVMOPERATIONS_HPP
|
#endif // SHARE_GC_PARALLEL_PSVMOPERATIONS_HPP
|
||||||
|
@ -42,10 +42,10 @@
|
|||||||
// VM_GC_HeapInspection
|
// VM_GC_HeapInspection
|
||||||
// VM_PopulateDynamicDumpSharedSpace
|
// VM_PopulateDynamicDumpSharedSpace
|
||||||
// VM_SerialGCCollect
|
// VM_SerialGCCollect
|
||||||
// VM_ParallelGCSystemGC
|
// VM_ParallelGCCollect
|
||||||
// VM_CollectForAllocation
|
// VM_CollectForAllocation
|
||||||
// VM_SerialCollectForAllocation
|
// VM_SerialCollectForAllocation
|
||||||
// VM_ParallelGCFailedAllocation
|
// VM_ParallelCollectForAllocation
|
||||||
// VM_Verify
|
// VM_Verify
|
||||||
// VM_PopulateDumpSharedSpace
|
// VM_PopulateDumpSharedSpace
|
||||||
//
|
//
|
||||||
@ -64,13 +64,13 @@
|
|||||||
//
|
//
|
||||||
// VM_CollectForAllocation
|
// VM_CollectForAllocation
|
||||||
// VM_SerialCollectForAllocation
|
// VM_SerialCollectForAllocation
|
||||||
// VM_ParallelGCFailedAllocation
|
// VM_ParallelCollectForAllocation
|
||||||
// - this operation is invoked when allocation is failed;
|
// - this operation is invoked when allocation is failed;
|
||||||
// operation performs garbage collection and tries to
|
// operation performs garbage collection and tries to
|
||||||
// allocate afterwards;
|
// allocate afterwards;
|
||||||
//
|
//
|
||||||
// VM_SerialGCCollect
|
// VM_SerialGCCollect
|
||||||
// VM_ParallelGCSystemGC
|
// VM_ParallelGCCollect
|
||||||
// - these operations perform full collection of heaps of
|
// - these operations perform full collection of heaps of
|
||||||
// different kind
|
// different kind
|
||||||
//
|
//
|
||||||
|
@ -52,8 +52,8 @@
|
|||||||
template(GC_HeapInspection) \
|
template(GC_HeapInspection) \
|
||||||
template(SerialCollectForAllocation) \
|
template(SerialCollectForAllocation) \
|
||||||
template(SerialGCCollect) \
|
template(SerialGCCollect) \
|
||||||
template(ParallelGCFailedAllocation) \
|
template(ParallelCollectForAllocation) \
|
||||||
template(ParallelGCSystemGC) \
|
template(ParallelGCCollect) \
|
||||||
template(G1CollectForAllocation) \
|
template(G1CollectForAllocation) \
|
||||||
template(G1CollectFull) \
|
template(G1CollectFull) \
|
||||||
template(G1PauseRemark) \
|
template(G1PauseRemark) \
|
||||||
|
@ -459,8 +459,6 @@ alias sun.gc.policy.edenSize // 1.5.0 b39
|
|||||||
hotspot.gc.policy.eden_size // 1.5.0 b21
|
hotspot.gc.policy.eden_size // 1.5.0 b21
|
||||||
alias sun.gc.policy.freeSpace // 1.5.0 b39
|
alias sun.gc.policy.freeSpace // 1.5.0 b39
|
||||||
hotspot.gc.policy.free_space // 1.5.0 b21
|
hotspot.gc.policy.free_space // 1.5.0 b21
|
||||||
alias sun.gc.policy.fullFollowsScavenge // 1.5.0 b39
|
|
||||||
hotspot.gc.policy.full_follows_scavenge // 1.5.0 b21
|
|
||||||
alias sun.gc.policy.gcTimeLimitExceeded // 1.5.0 b39
|
alias sun.gc.policy.gcTimeLimitExceeded // 1.5.0 b39
|
||||||
hotspot.gc.policy.gc_time_limit_exceeded // 1.5.0 b21
|
hotspot.gc.policy.gc_time_limit_exceeded // 1.5.0 b21
|
||||||
alias sun.gc.policy.generations // 1.5.0 b39
|
alias sun.gc.policy.generations // 1.5.0 b39
|
||||||
@ -508,8 +506,6 @@ alias sun.gc.policy.promoSize // 1.5.0 b39
|
|||||||
hotspot.gc.policy.promo_size // 1.5.0 b21
|
hotspot.gc.policy.promo_size // 1.5.0 b21
|
||||||
alias sun.gc.policy.promoted // 1.5.0 b39
|
alias sun.gc.policy.promoted // 1.5.0 b39
|
||||||
hotspot.gc.policy.promoted // 1.5.0 b21
|
hotspot.gc.policy.promoted // 1.5.0 b21
|
||||||
alias sun.gc.policy.scavengeSkipped // 1.5.0 b39
|
|
||||||
hotspot.gc.policy.scavenge_skipped // 1.5.0 b21
|
|
||||||
alias sun.gc.policy.survived // 1.5.0 b39
|
alias sun.gc.policy.survived // 1.5.0 b39
|
||||||
hotspot.gc.policy.survived // 1.5.0 b21
|
hotspot.gc.policy.survived // 1.5.0 b21
|
||||||
alias sun.gc.policy.survivorOverflowed // 1.5.0 b39
|
alias sun.gc.policy.survivorOverflowed // 1.5.0 b39
|
||||||
|
@ -43,7 +43,7 @@ import jdk.test.lib.jfr.Events;
|
|||||||
public class TestVMOperation {
|
public class TestVMOperation {
|
||||||
|
|
||||||
private static final String EVENT_NAME = EventNames.ExecuteVMOperation;
|
private static final String EVENT_NAME = EventNames.ExecuteVMOperation;
|
||||||
private static final String VM_OPERATION = "ParallelGCSystemGC";
|
private static final String VM_OPERATION = "ParallelGCCollect";
|
||||||
|
|
||||||
public static void main(String[] args) throws Throwable {
|
public static void main(String[] args) throws Throwable {
|
||||||
Recording recording = new Recording();
|
Recording recording = new Recording();
|
||||||
|
Loading…
x
Reference in New Issue
Block a user