8335902: Parallel: Refactor VM_ParallelGCFailedAllocation and VM_ParallelGCSystemGC
Reviewed-by: gli, zgu
This commit is contained in:
parent
2fc7eb44a0
commit
34d8562a91
@ -263,13 +263,20 @@ bool ParallelScavengeHeap::requires_barriers(stackChunkOop p) const {
|
||||
// and the rest will not be executed. For that reason, this method loops
|
||||
// during failed allocation attempts. If the java heap becomes exhausted,
|
||||
// we rely on the size_policy object to force a bail out.
|
||||
HeapWord* ParallelScavengeHeap::mem_allocate(
|
||||
size_t size,
|
||||
bool* gc_overhead_limit_was_exceeded) {
|
||||
HeapWord* ParallelScavengeHeap::mem_allocate(size_t size,
|
||||
bool* gc_overhead_limit_was_exceeded) {
|
||||
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
|
||||
assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
|
||||
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
|
||||
|
||||
bool is_tlab = false;
|
||||
return mem_allocate_work(size, is_tlab, gc_overhead_limit_was_exceeded);
|
||||
}
|
||||
|
||||
HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size,
|
||||
bool is_tlab,
|
||||
bool* gc_overhead_limit_was_exceeded) {
|
||||
|
||||
// In general gc_overhead_limit_was_exceeded should be false so
|
||||
// set it so here and reset it to true only if the gc time
|
||||
// limit is being exceeded as checked below.
|
||||
@ -303,9 +310,11 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
||||
}
|
||||
|
||||
// If certain conditions hold, try allocating from the old gen.
|
||||
result = mem_allocate_old_gen(size);
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
if (!is_tlab) {
|
||||
result = mem_allocate_old_gen(size);
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
|
||||
@ -338,7 +347,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
||||
|
||||
if (result == nullptr) {
|
||||
// Generate a VM operation
|
||||
VM_ParallelGCFailedAllocation op(size, gc_count);
|
||||
VM_ParallelCollectForAllocation op(size, is_tlab, gc_count);
|
||||
VMThread::execute(&op);
|
||||
|
||||
// Did the VM operation execute? If so, return the result directly.
|
||||
@ -395,23 +404,6 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
||||
return result;
|
||||
}
|
||||
|
||||
// A "death march" is a series of ultra-slow allocations in which a full gc is
|
||||
// done before each allocation, and after the full gc the allocation still
|
||||
// cannot be satisfied from the young gen. This routine detects that condition;
|
||||
// it should be called after a full gc has been done and the allocation
|
||||
// attempted from the young gen. The parameter 'addr' should be the result of
|
||||
// that young gen allocation attempt.
|
||||
void
|
||||
ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
|
||||
if (addr != nullptr) {
|
||||
_death_march_count = 0; // death march has ended
|
||||
} else if (_death_march_count == 0) {
|
||||
if (should_alloc_in_eden(size)) {
|
||||
_death_march_count = 1; // death march has started
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* ParallelScavengeHeap::allocate_old_gen_and_record(size_t size) {
|
||||
assert_locked_or_safepoint(Heap_lock);
|
||||
HeapWord* res = old_gen()->allocate(size);
|
||||
@ -427,74 +419,76 @@ HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
|
||||
return allocate_old_gen_and_record(size);
|
||||
}
|
||||
|
||||
// If a "death march" is in progress, allocate from the old gen a limited
|
||||
// number of times before doing a GC.
|
||||
if (_death_march_count > 0) {
|
||||
if (_death_march_count < 64) {
|
||||
++_death_march_count;
|
||||
return allocate_old_gen_and_record(size);
|
||||
} else {
|
||||
_death_march_count = 0;
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
|
||||
if (GCLocker::check_active_before_gc()) {
|
||||
return;
|
||||
}
|
||||
PSParallelCompact::invoke(clear_all_soft_refs);
|
||||
}
|
||||
|
||||
// Failed allocation policy. Must be called from the VM thread, and
|
||||
// only at a safepoint! Note that this method has policy for allocation
|
||||
// flow, and NOT collection policy. So we do not check for gc collection
|
||||
// time over limit here, that is the responsibility of the heap specific
|
||||
// collection methods. This method decides where to attempt allocations,
|
||||
// and when to attempt collections, but no collection specific policy.
|
||||
HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
||||
assert(!is_stw_gc_active(), "not reentrant");
|
||||
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
|
||||
HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
|
||||
HeapWord* result = nullptr;
|
||||
|
||||
// We assume that allocation in eden will fail unless we collect.
|
||||
|
||||
// First level allocation failure, scavenge and allocate in young gen.
|
||||
GCCauseSetter gccs(this, GCCause::_allocation_failure);
|
||||
const bool invoked_full_gc = PSScavenge::invoke();
|
||||
HeapWord* result = young_gen()->allocate(size);
|
||||
|
||||
// Second level allocation failure.
|
||||
// Mark sweep and allocate in young generation.
|
||||
if (result == nullptr && !invoked_full_gc) {
|
||||
do_full_collection(false);
|
||||
result = young_gen()->allocate(size);
|
||||
result = young_gen()->allocate(size);
|
||||
if (result == nullptr && !is_tlab) {
|
||||
// auto expand inside
|
||||
result = old_gen()->allocate(size);
|
||||
}
|
||||
|
||||
death_march_check(result, size);
|
||||
|
||||
// Third level allocation failure.
|
||||
// After mark sweep and young generation allocation failure,
|
||||
// allocate in old generation.
|
||||
if (result == nullptr) {
|
||||
result = allocate_old_gen_and_record(size);
|
||||
}
|
||||
|
||||
// Fourth level allocation failure. We're running out of memory.
|
||||
// More complete mark sweep and allocate in young generation.
|
||||
if (result == nullptr) {
|
||||
do_full_collection(true);
|
||||
result = young_gen()->allocate(size);
|
||||
}
|
||||
|
||||
// Fifth level allocation failure.
|
||||
// After more complete mark sweep, allocate in old generation.
|
||||
if (result == nullptr) {
|
||||
result = allocate_old_gen_and_record(size);
|
||||
}
|
||||
|
||||
return result;
|
||||
return result; // Could be null if we are out of space.
|
||||
}
|
||||
|
||||
HeapWord* ParallelScavengeHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
|
||||
assert(size != 0, "precondition");
|
||||
|
||||
HeapWord* result = nullptr;
|
||||
|
||||
GCLocker::check_active_before_gc();
|
||||
if (GCLocker::is_active_and_needs_gc()) {
|
||||
return expand_heap_and_allocate(size, is_tlab);
|
||||
}
|
||||
|
||||
// If young-gen can handle this allocation, attempt young-gc firstly.
|
||||
bool should_run_young_gc = is_tlab || should_alloc_in_eden(size);
|
||||
collect_at_safepoint(!should_run_young_gc);
|
||||
|
||||
result = expand_heap_and_allocate(size, is_tlab);
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// If we reach this point, we're really out of memory. Try every trick
|
||||
// we can to reclaim memory. Force collection of soft references. Force
|
||||
// a complete compaction of the heap. Any additional methods for finding
|
||||
// free memory should be here, especially if they are expensive. If this
|
||||
// attempt fails, an OOM exception will be thrown.
|
||||
{
|
||||
// Make sure the heap is fully compacted
|
||||
uintx old_interval = HeapMaximumCompactionInterval;
|
||||
HeapMaximumCompactionInterval = 0;
|
||||
|
||||
const bool clear_all_soft_refs = true;
|
||||
PSParallelCompact::invoke(clear_all_soft_refs);
|
||||
|
||||
// Restore
|
||||
HeapMaximumCompactionInterval = old_interval;
|
||||
}
|
||||
|
||||
result = expand_heap_and_allocate(size, is_tlab);
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// What else? We might try synchronous finalization later. If the total
|
||||
// space available is large enough for the allocation, then a more
|
||||
// complete compaction phase than we've tried so far might be
|
||||
// appropriate.
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
|
||||
CollectedHeap::ensure_parsability(retire_tlabs);
|
||||
young_gen()->eden_space()->ensure_parsability();
|
||||
@ -513,7 +507,10 @@ size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
|
||||
}
|
||||
|
||||
HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
|
||||
HeapWord* result = young_gen()->allocate(requested_size);
|
||||
bool dummy;
|
||||
HeapWord* result = mem_allocate_work(requested_size /* size */,
|
||||
true /* is_tlab */,
|
||||
&dummy);
|
||||
if (result != nullptr) {
|
||||
*actual_size = requested_size;
|
||||
}
|
||||
@ -533,7 +530,6 @@ void ParallelScavengeHeap::prune_unlinked_nmethods() {
|
||||
ScavengableNMethods::prune_unlinked_nmethods();
|
||||
}
|
||||
|
||||
// This method is used by System.gc() and JVMTI.
|
||||
void ParallelScavengeHeap::collect(GCCause::Cause cause) {
|
||||
assert(!Heap_lock->owned_by_self(),
|
||||
"this thread should not own the Heap_lock");
|
||||
@ -552,10 +548,10 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
|
||||
}
|
||||
|
||||
while (true) {
|
||||
VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
|
||||
VM_ParallelGCCollect op(gc_count, full_gc_count, cause);
|
||||
VMThread::execute(&op);
|
||||
|
||||
if (!GCCause::is_explicit_full_gc(cause) || op.full_gc_succeeded()) {
|
||||
if (!GCCause::is_explicit_full_gc(cause)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -573,6 +569,33 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
|
||||
}
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::try_collect_at_safepoint(bool full) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "precondition");
|
||||
if (GCLocker::check_active_before_gc()) {
|
||||
return;
|
||||
}
|
||||
collect_at_safepoint(full);
|
||||
}
|
||||
|
||||
bool ParallelScavengeHeap::must_clear_all_soft_refs() {
|
||||
return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
|
||||
_gc_cause == GCCause::_wb_full_gc;
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::collect_at_safepoint(bool full) {
|
||||
assert(!GCLocker::is_active(), "precondition");
|
||||
bool clear_soft_refs = must_clear_all_soft_refs();
|
||||
|
||||
if (!full) {
|
||||
bool success = PSScavenge::invoke(clear_soft_refs);
|
||||
if (success) {
|
||||
return;
|
||||
}
|
||||
// Upgrade to Full-GC if young-gc fails
|
||||
}
|
||||
PSParallelCompact::invoke(clear_soft_refs);
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
|
||||
young_gen()->object_iterate(cl);
|
||||
old_gen()->object_iterate(cl);
|
||||
|
@ -75,8 +75,6 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
static PSAdaptiveSizePolicy* _size_policy;
|
||||
static PSGCAdaptivePolicyCounters* _gc_policy_counters;
|
||||
|
||||
unsigned int _death_march_count;
|
||||
|
||||
GCMemoryManager* _young_manager;
|
||||
GCMemoryManager* _old_manager;
|
||||
|
||||
@ -96,17 +94,27 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
|
||||
void update_parallel_worker_threads_cpu_time();
|
||||
|
||||
protected:
|
||||
void collect_at_safepoint(bool full);
|
||||
|
||||
bool must_clear_all_soft_refs();
|
||||
|
||||
HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) override;
|
||||
|
||||
inline bool should_alloc_in_eden(size_t size) const;
|
||||
inline void death_march_check(HeapWord* const result, size_t size);
|
||||
|
||||
HeapWord* mem_allocate_old_gen(size_t size);
|
||||
|
||||
public:
|
||||
HeapWord* mem_allocate_work(size_t size,
|
||||
bool is_tlab,
|
||||
bool* gc_overhead_limit_was_exceeded);
|
||||
|
||||
HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
|
||||
|
||||
void do_full_collection(bool clear_all_soft_refs) override;
|
||||
|
||||
public:
|
||||
ParallelScavengeHeap() :
|
||||
CollectedHeap(),
|
||||
_death_march_count(0),
|
||||
_young_manager(nullptr),
|
||||
_old_manager(nullptr),
|
||||
_eden_pool(nullptr),
|
||||
@ -184,25 +192,12 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
// "gc_time_limit_was_exceeded" has an undefined meaning.
|
||||
HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) override;
|
||||
|
||||
// Allocation attempt(s) during a safepoint. It should never be called
|
||||
// to allocate a new TLAB as this allocation might be satisfied out
|
||||
// of the old generation.
|
||||
HeapWord* failed_mem_allocate(size_t size);
|
||||
HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
|
||||
|
||||
// Support for System.gc()
|
||||
void collect(GCCause::Cause cause) override;
|
||||
|
||||
// These also should be called by the vm thread at a safepoint (e.g., from a
|
||||
// VM operation).
|
||||
//
|
||||
// The first collects the young generation only, unless the scavenge fails; it
|
||||
// will then attempt a full gc. The second collects the entire heap; if
|
||||
// maximum_compaction is true, it will compact everything and clear all soft
|
||||
// references.
|
||||
inline bool invoke_scavenge();
|
||||
|
||||
// Perform a full collection
|
||||
void do_full_collection(bool clear_all_soft_refs) override;
|
||||
void try_collect_at_safepoint(bool full);
|
||||
|
||||
void ensure_parsability(bool retire_tlabs) override;
|
||||
void resize_all_tlabs() override;
|
||||
|
@ -34,10 +34,6 @@ inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const
|
||||
return size < eden_size / 2;
|
||||
}
|
||||
|
||||
inline bool ParallelScavengeHeap::invoke_scavenge() {
|
||||
return PSScavenge::invoke();
|
||||
}
|
||||
|
||||
inline bool ParallelScavengeHeap::is_in_young(const void* p) const {
|
||||
// Assumes the old gen address range is lower than that of the young gen.
|
||||
bool result = p >= young_gen()->reserved().start();
|
||||
|
@ -128,14 +128,6 @@ PSGCAdaptivePolicyCounters::PSGCAdaptivePolicyCounters(const char* name_arg,
|
||||
_major_pause_young_slope = PerfDataManager::create_variable(SUN_GC, cname,
|
||||
PerfData::U_None, (jlong) 0, CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "scavengeSkipped");
|
||||
_scavenge_skipped = PerfDataManager::create_variable(SUN_GC, cname,
|
||||
PerfData::U_Bytes, (jlong) 0, CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "fullFollowsScavenge");
|
||||
_full_follows_scavenge = PerfDataManager::create_variable(SUN_GC, cname,
|
||||
PerfData::U_Bytes, (jlong) 0, CHECK);
|
||||
|
||||
_counter_time_stamp.update();
|
||||
}
|
||||
|
||||
|
@ -61,9 +61,6 @@ class PSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
|
||||
PerfVariable* _minor_pause_old_slope;
|
||||
PerfVariable* _major_pause_young_slope;
|
||||
|
||||
PerfVariable* _scavenge_skipped;
|
||||
PerfVariable* _full_follows_scavenge;
|
||||
|
||||
// Use this time stamp if the gc time stamp is not available.
|
||||
TimeStamp _counter_time_stamp;
|
||||
|
||||
@ -180,14 +177,6 @@ class PSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
|
||||
(jlong)(ps_size_policy()->live_at_last_full_gc()));
|
||||
}
|
||||
|
||||
inline void update_scavenge_skipped(int cause) {
|
||||
_scavenge_skipped->set_value(cause);
|
||||
}
|
||||
|
||||
inline void update_full_follows_scavenge(int event) {
|
||||
_full_follows_scavenge->set_value(event);
|
||||
}
|
||||
|
||||
// Update all the counters that can be updated from the size policy.
|
||||
// This should be called after all policy changes have been made
|
||||
// and reflected internally in the size policy.
|
||||
|
@ -51,6 +51,7 @@
|
||||
#include "gc/shared/gcTimer.hpp"
|
||||
#include "gc/shared/gcTrace.hpp"
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||
#include "gc/shared/gcVMOperations.hpp"
|
||||
#include "gc/shared/isGCActiveMark.hpp"
|
||||
#include "gc/shared/oopStorage.inline.hpp"
|
||||
#include "gc/shared/oopStorageSet.inline.hpp"
|
||||
@ -968,6 +969,7 @@ bool PSParallelCompact::invoke(bool clear_all_soft_refs) {
|
||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(),
|
||||
"should be in vm thread");
|
||||
|
||||
SvcGCMarker sgcm(SvcGCMarker::FULL);
|
||||
IsSTWGCActiveMark mark;
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include "gc/shared/gcTimer.hpp"
|
||||
#include "gc/shared/gcTrace.hpp"
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||
#include "gc/shared/gcVMOperations.hpp"
|
||||
#include "gc/shared/isGCActiveMark.hpp"
|
||||
#include "gc/shared/oopStorage.inline.hpp"
|
||||
#include "gc/shared/oopStorageSetParState.inline.hpp"
|
||||
@ -221,42 +222,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
// This method contains all heap specific policy for invoking scavenge.
|
||||
// PSScavenge::invoke_no_policy() will do nothing but attempt to
|
||||
// scavenge. It will not clean up after failed promotions, bail out if
|
||||
// we've exceeded policy time limits, or any other special behavior.
|
||||
// All such policy should be placed here.
|
||||
//
|
||||
// Note that this method should only be called from the vm_thread while
|
||||
// at a safepoint!
|
||||
bool PSScavenge::invoke() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
||||
assert(!ParallelScavengeHeap::heap()->is_stw_gc_active(), "not reentrant");
|
||||
|
||||
ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap();
|
||||
IsSTWGCActiveMark mark;
|
||||
|
||||
const bool scavenge_done = PSScavenge::invoke_no_policy();
|
||||
const bool need_full_gc = !scavenge_done;
|
||||
bool full_gc_done = false;
|
||||
|
||||
if (UsePerfData) {
|
||||
PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
|
||||
const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped;
|
||||
counters->update_full_follows_scavenge(ffs_val);
|
||||
}
|
||||
|
||||
if (need_full_gc) {
|
||||
GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
|
||||
const bool clear_all_softrefs = heap->soft_ref_policy()->should_clear_all_soft_refs();
|
||||
|
||||
full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
|
||||
}
|
||||
|
||||
return full_gc_done;
|
||||
}
|
||||
|
||||
class PSThreadRootsTaskClosure : public ThreadClosure {
|
||||
uint _worker_id;
|
||||
public:
|
||||
@ -288,14 +253,14 @@ class ScavengeRootsTask : public WorkerTask {
|
||||
public:
|
||||
ScavengeRootsTask(PSOldGen* old_gen,
|
||||
uint active_workers) :
|
||||
WorkerTask("ScavengeRootsTask"),
|
||||
_strong_roots_scope(active_workers),
|
||||
_subtasks(ParallelRootType::sentinel),
|
||||
_old_gen(old_gen),
|
||||
_gen_top(old_gen->object_space()->top()),
|
||||
_active_workers(active_workers),
|
||||
_is_old_gen_empty(old_gen->object_space()->is_empty()),
|
||||
_terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) {
|
||||
WorkerTask("ScavengeRootsTask"),
|
||||
_strong_roots_scope(active_workers),
|
||||
_subtasks(ParallelRootType::sentinel),
|
||||
_old_gen(old_gen),
|
||||
_gen_top(old_gen->object_space()->top()),
|
||||
_active_workers(active_workers),
|
||||
_is_old_gen_empty(old_gen->object_space()->is_empty()),
|
||||
_terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) {
|
||||
if (!_is_old_gen_empty) {
|
||||
PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
|
||||
card_table->pre_scavenge(active_workers);
|
||||
@ -353,26 +318,23 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
// This method contains no policy. You should probably
|
||||
// be calling invoke() instead.
|
||||
bool PSScavenge::invoke_no_policy() {
|
||||
bool PSScavenge::invoke(bool clear_soft_refs) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
||||
|
||||
_gc_timer.register_gc_start();
|
||||
|
||||
if (GCLocker::check_active_before_gc()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
GCCause::Cause gc_cause = heap->gc_cause();
|
||||
|
||||
// Check for potential problems.
|
||||
if (!should_attempt_scavenge()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
IsSTWGCActiveMark mark;
|
||||
|
||||
_gc_timer.register_gc_start();
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
GCCause::Cause gc_cause = heap->gc_cause();
|
||||
|
||||
SvcGCMarker sgcm(SvcGCMarker::MINOR);
|
||||
GCIdMark gc_id_mark;
|
||||
_gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
|
||||
|
||||
@ -425,7 +387,7 @@ bool PSScavenge::invoke_no_policy() {
|
||||
DerivedPointerTable::clear();
|
||||
#endif
|
||||
|
||||
reference_processor()->start_discovery(false /* always_clear */);
|
||||
reference_processor()->start_discovery(clear_soft_refs);
|
||||
|
||||
const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
|
||||
|
||||
@ -537,14 +499,13 @@ bool PSScavenge::invoke_no_policy() {
|
||||
size_t survivor_limit =
|
||||
size_policy->max_survivor_size(max_young_size);
|
||||
_tenuring_threshold =
|
||||
size_policy->compute_survivor_space_size_and_threshold(
|
||||
_survivor_overflow,
|
||||
_tenuring_threshold,
|
||||
survivor_limit);
|
||||
size_policy->compute_survivor_space_size_and_threshold(_survivor_overflow,
|
||||
_tenuring_threshold,
|
||||
survivor_limit);
|
||||
|
||||
log_debug(gc, age)("Desired survivor size %zu bytes, new threshold %u (max threshold %u)",
|
||||
size_policy->calculated_survivor_size_in_bytes(),
|
||||
_tenuring_threshold, MaxTenuringThreshold);
|
||||
log_debug(gc, age)("Desired survivor size %zu bytes, new threshold %u (max threshold %u)",
|
||||
size_policy->calculated_survivor_size_in_bytes(),
|
||||
_tenuring_threshold, MaxTenuringThreshold);
|
||||
|
||||
if (UsePerfData) {
|
||||
PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
|
||||
@ -568,8 +529,8 @@ bool PSScavenge::invoke_no_policy() {
|
||||
size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
|
||||
size_t max_old_gen_size = old_gen->max_gen_size();
|
||||
size_t max_eden_size = max_young_size -
|
||||
young_gen->from_space()->capacity_in_bytes() -
|
||||
young_gen->to_space()->capacity_in_bytes();
|
||||
young_gen->from_space()->capacity_in_bytes() -
|
||||
young_gen->to_space()->capacity_in_bytes();
|
||||
|
||||
// Used for diagnostics
|
||||
size_policy->clear_generation_free_space_flags();
|
||||
@ -599,7 +560,7 @@ bool PSScavenge::invoke_no_policy() {
|
||||
// a full collection. Don't resize the old gen here.
|
||||
|
||||
heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
|
||||
size_policy->calculated_survivor_size_in_bytes());
|
||||
size_policy->calculated_survivor_size_in_bytes());
|
||||
|
||||
log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
|
||||
}
|
||||
@ -657,20 +618,12 @@ void PSScavenge::clean_up_failed_promotion() {
|
||||
|
||||
bool PSScavenge::should_attempt_scavenge() {
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
|
||||
|
||||
if (UsePerfData) {
|
||||
counters->update_scavenge_skipped(not_skipped);
|
||||
}
|
||||
|
||||
PSYoungGen* young_gen = heap->young_gen();
|
||||
PSOldGen* old_gen = heap->old_gen();
|
||||
|
||||
// Do not attempt to promote unless to_space is empty
|
||||
if (!young_gen->to_space()->is_empty()) {
|
||||
if (UsePerfData) {
|
||||
counters->update_scavenge_skipped(to_space_not_empty);
|
||||
}
|
||||
// To-space is not empty; should run full-gc instead.
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -687,15 +640,7 @@ bool PSScavenge::should_attempt_scavenge() {
|
||||
result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(),
|
||||
(size_t) policy->padded_average_promoted_in_bytes(),
|
||||
free_in_old_gen);
|
||||
if (young_gen->used_in_bytes() < (size_t) policy->padded_average_promoted_in_bytes()) {
|
||||
log_trace(ergo)(" padded_promoted_average is greater than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
|
||||
}
|
||||
|
||||
if (!result) {
|
||||
if (UsePerfData) {
|
||||
counters->update_scavenge_skipped(promoted_too_large);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -45,13 +45,6 @@ class PSScavenge: AllStatic {
|
||||
friend class PSKeepAliveClosure;
|
||||
friend class PSPromotionManager;
|
||||
|
||||
enum ScavengeSkippedCause {
|
||||
not_skipped = 0,
|
||||
to_space_not_empty,
|
||||
promoted_too_large,
|
||||
full_follows_scavenge
|
||||
};
|
||||
|
||||
protected:
|
||||
// Flags/counters
|
||||
static SpanSubjectToDiscoveryClosure _span_based_discoverer;
|
||||
@ -105,10 +98,9 @@ class PSScavenge: AllStatic {
|
||||
// Called by parallelScavengeHeap to init the tenuring threshold
|
||||
static void initialize();
|
||||
|
||||
// Scavenge entry point. This may invoke a full gc; return true if so.
|
||||
static bool invoke();
|
||||
// Return true if a collection was done; false otherwise.
|
||||
static bool invoke_no_policy();
|
||||
// Scavenge entry point.
|
||||
// Return true iff a young-gc is completed without promotion-failure.
|
||||
static bool invoke(bool clear_soft_refs);
|
||||
|
||||
template <class T> static inline bool should_scavenge(T* p);
|
||||
|
||||
|
@ -31,19 +31,19 @@
|
||||
#include "utilities/dtrace.hpp"
|
||||
|
||||
// The following methods are used by the parallel scavenge collector
|
||||
VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t word_size,
|
||||
uint gc_count) :
|
||||
VM_CollectForAllocation(word_size, gc_count, GCCause::_allocation_failure) {
|
||||
VM_ParallelCollectForAllocation::VM_ParallelCollectForAllocation(size_t word_size,
|
||||
bool is_tlab,
|
||||
uint gc_count) :
|
||||
VM_CollectForAllocation(word_size, gc_count, GCCause::_allocation_failure),
|
||||
_is_tlab(is_tlab) {
|
||||
assert(word_size != 0, "An allocation should always be requested with this operation.");
|
||||
}
|
||||
|
||||
void VM_ParallelGCFailedAllocation::doit() {
|
||||
SvcGCMarker sgcm(SvcGCMarker::MINOR);
|
||||
|
||||
void VM_ParallelCollectForAllocation::doit() {
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
||||
GCCauseSetter gccs(heap, _gc_cause);
|
||||
_result = heap->failed_mem_allocate(_word_size);
|
||||
_result = heap->satisfy_failed_allocation(_word_size, _is_tlab);
|
||||
|
||||
if (_result == nullptr && GCLocker::is_active_and_needs_gc()) {
|
||||
set_gc_locked();
|
||||
@ -56,24 +56,14 @@ static bool is_cause_full(GCCause::Cause cause) {
|
||||
}
|
||||
|
||||
// Only used for System.gc() calls
|
||||
VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(uint gc_count,
|
||||
VM_ParallelGCCollect::VM_ParallelGCCollect(uint gc_count,
|
||||
uint full_gc_count,
|
||||
GCCause::Cause gc_cause) :
|
||||
VM_GC_Operation(gc_count, gc_cause, full_gc_count, is_cause_full(gc_cause)),
|
||||
_full_gc_succeeded(false)
|
||||
{
|
||||
}
|
||||
|
||||
void VM_ParallelGCSystemGC::doit() {
|
||||
SvcGCMarker sgcm(SvcGCMarker::FULL);
|
||||
VM_GC_Operation(gc_count, gc_cause, full_gc_count, is_cause_full(gc_cause)) {}
|
||||
|
||||
void VM_ParallelGCCollect::doit() {
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
||||
GCCauseSetter gccs(heap, _gc_cause);
|
||||
if (!_full) {
|
||||
// If (and only if) the scavenge fails, this will invoke a full gc.
|
||||
_full_gc_succeeded = heap->invoke_scavenge();
|
||||
} else {
|
||||
_full_gc_succeeded = PSParallelCompact::invoke(false);
|
||||
}
|
||||
heap->try_collect_at_safepoint(_full);
|
||||
}
|
||||
|
@ -29,23 +29,22 @@
|
||||
#include "gc/shared/gcCause.hpp"
|
||||
#include "gc/shared/gcVMOperations.hpp"
|
||||
|
||||
class VM_ParallelGCFailedAllocation : public VM_CollectForAllocation {
|
||||
public:
|
||||
VM_ParallelGCFailedAllocation(size_t word_size, uint gc_count);
|
||||
class VM_ParallelCollectForAllocation : public VM_CollectForAllocation {
|
||||
bool _is_tlab;
|
||||
public:
|
||||
VM_ParallelCollectForAllocation(size_t word_size, bool is_tlab, uint gc_count);
|
||||
|
||||
virtual VMOp_Type type() const {
|
||||
return VMOp_ParallelGCFailedAllocation;
|
||||
return VMOp_ParallelCollectForAllocation;
|
||||
}
|
||||
virtual void doit();
|
||||
};
|
||||
|
||||
class VM_ParallelGCSystemGC: public VM_GC_Operation {
|
||||
bool _full_gc_succeeded;
|
||||
class VM_ParallelGCCollect: public VM_GC_Operation {
|
||||
public:
|
||||
VM_ParallelGCSystemGC(uint gc_count, uint full_gc_count, GCCause::Cause gc_cause);
|
||||
virtual VMOp_Type type() const { return VMOp_ParallelGCSystemGC; }
|
||||
VM_ParallelGCCollect(uint gc_count, uint full_gc_count, GCCause::Cause gc_cause);
|
||||
virtual VMOp_Type type() const { return VMOp_ParallelGCCollect; }
|
||||
virtual void doit();
|
||||
bool full_gc_succeeded() const { return _full_gc_succeeded; }
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_PARALLEL_PSVMOPERATIONS_HPP
|
||||
|
@ -42,10 +42,10 @@
|
||||
// VM_GC_HeapInspection
|
||||
// VM_PopulateDynamicDumpSharedSpace
|
||||
// VM_SerialGCCollect
|
||||
// VM_ParallelGCSystemGC
|
||||
// VM_ParallelGCCollect
|
||||
// VM_CollectForAllocation
|
||||
// VM_SerialCollectForAllocation
|
||||
// VM_ParallelGCFailedAllocation
|
||||
// VM_ParallelCollectForAllocation
|
||||
// VM_Verify
|
||||
// VM_PopulateDumpSharedSpace
|
||||
//
|
||||
@ -64,13 +64,13 @@
|
||||
//
|
||||
// VM_CollectForAllocation
|
||||
// VM_SerialCollectForAllocation
|
||||
// VM_ParallelGCFailedAllocation
|
||||
// VM_ParallelCollectForAllocation
|
||||
// - this operation is invoked when allocation is failed;
|
||||
// operation performs garbage collection and tries to
|
||||
// allocate afterwards;
|
||||
//
|
||||
// VM_SerialGCCollect
|
||||
// VM_ParallelGCSystemGC
|
||||
// VM_ParallelGCCollect
|
||||
// - these operations perform full collection of heaps of
|
||||
// different kind
|
||||
//
|
||||
|
@ -52,8 +52,8 @@
|
||||
template(GC_HeapInspection) \
|
||||
template(SerialCollectForAllocation) \
|
||||
template(SerialGCCollect) \
|
||||
template(ParallelGCFailedAllocation) \
|
||||
template(ParallelGCSystemGC) \
|
||||
template(ParallelCollectForAllocation) \
|
||||
template(ParallelGCCollect) \
|
||||
template(G1CollectForAllocation) \
|
||||
template(G1CollectFull) \
|
||||
template(G1PauseRemark) \
|
||||
|
@ -459,8 +459,6 @@ alias sun.gc.policy.edenSize // 1.5.0 b39
|
||||
hotspot.gc.policy.eden_size // 1.5.0 b21
|
||||
alias sun.gc.policy.freeSpace // 1.5.0 b39
|
||||
hotspot.gc.policy.free_space // 1.5.0 b21
|
||||
alias sun.gc.policy.fullFollowsScavenge // 1.5.0 b39
|
||||
hotspot.gc.policy.full_follows_scavenge // 1.5.0 b21
|
||||
alias sun.gc.policy.gcTimeLimitExceeded // 1.5.0 b39
|
||||
hotspot.gc.policy.gc_time_limit_exceeded // 1.5.0 b21
|
||||
alias sun.gc.policy.generations // 1.5.0 b39
|
||||
@ -508,8 +506,6 @@ alias sun.gc.policy.promoSize // 1.5.0 b39
|
||||
hotspot.gc.policy.promo_size // 1.5.0 b21
|
||||
alias sun.gc.policy.promoted // 1.5.0 b39
|
||||
hotspot.gc.policy.promoted // 1.5.0 b21
|
||||
alias sun.gc.policy.scavengeSkipped // 1.5.0 b39
|
||||
hotspot.gc.policy.scavenge_skipped // 1.5.0 b21
|
||||
alias sun.gc.policy.survived // 1.5.0 b39
|
||||
hotspot.gc.policy.survived // 1.5.0 b21
|
||||
alias sun.gc.policy.survivorOverflowed // 1.5.0 b39
|
||||
|
@ -43,7 +43,7 @@ import jdk.test.lib.jfr.Events;
|
||||
public class TestVMOperation {
|
||||
|
||||
private static final String EVENT_NAME = EventNames.ExecuteVMOperation;
|
||||
private static final String VM_OPERATION = "ParallelGCSystemGC";
|
||||
private static final String VM_OPERATION = "ParallelGCCollect";
|
||||
|
||||
public static void main(String[] args) throws Throwable {
|
||||
Recording recording = new Recording();
|
||||
|
Loading…
x
Reference in New Issue
Block a user