8331557: Serial: Refactor SerialHeap::do_collection

Reviewed-by: gli, iwalulya
This commit is contained in:
Albert Mingkun Yang 2024-05-17 09:09:02 +00:00
parent 14198f502f
commit f1ce9b0ecc
15 changed files with 208 additions and 439 deletions

View File

@ -641,22 +641,9 @@ void DefNewGeneration::adjust_desired_tenuring_threshold() {
age_table()->print_age_table(); age_table()->print_age_table();
} }
void DefNewGeneration::collect(bool full, bool DefNewGeneration::collect(bool clear_all_soft_refs) {
bool clear_all_soft_refs,
size_t size,
bool is_tlab) {
assert(full || size > 0, "otherwise we don't want to collect");
SerialHeap* heap = SerialHeap::heap(); SerialHeap* heap = SerialHeap::heap();
// If the next generation is too full to accommodate promotion
// from this generation, pass on collection; let the next generation
// do it.
if (!collection_attempt_is_safe()) {
log_trace(gc)(":: Collection attempt not safe ::");
heap->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
return;
}
assert(to()->is_empty(), "Else not collection_attempt_is_safe"); assert(to()->is_empty(), "Else not collection_attempt_is_safe");
_gc_timer->register_gc_start(); _gc_timer->register_gc_start();
_gc_tracer->report_gc_start(heap->gc_cause(), _gc_timer->gc_start()); _gc_tracer->report_gc_start(heap->gc_cause(), _gc_timer->gc_start());
@ -774,6 +761,8 @@ void DefNewGeneration::collect(bool full,
_gc_timer->register_gc_end(); _gc_timer->register_gc_end();
_gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
return !_promotion_failed;
} }
void DefNewGeneration::init_assuming_no_promotion_failure() { void DefNewGeneration::init_assuming_no_promotion_failure() {

View File

@ -208,19 +208,18 @@ class DefNewGeneration: public Generation {
HeapWord* block_start(const void* p) const; HeapWord* block_start(const void* p) const;
// Allocation support // Allocation support
virtual bool should_allocate(size_t word_size, bool is_tlab) { bool should_allocate(size_t word_size, bool is_tlab) {
assert(UseTLAB || !is_tlab, "Should not allocate tlab"); assert(UseTLAB || !is_tlab, "Should not allocate tlab");
assert(word_size != 0, "precondition");
size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize); size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
const bool non_zero = word_size > 0;
const bool overflows = word_size >= overflow_limit; const bool overflows = word_size >= overflow_limit;
const bool check_too_big = _pretenure_size_threshold_words > 0; const bool check_too_big = _pretenure_size_threshold_words > 0;
const bool not_too_big = word_size < _pretenure_size_threshold_words; const bool not_too_big = word_size < _pretenure_size_threshold_words;
const bool size_ok = is_tlab || !check_too_big || not_too_big; const bool size_ok = is_tlab || !check_too_big || not_too_big;
bool result = !overflows && bool result = !overflows &&
non_zero &&
size_ok; size_ok;
return result; return result;
@ -253,10 +252,7 @@ class DefNewGeneration: public Generation {
// at some additional cost. // at some additional cost.
bool collection_attempt_is_safe(); bool collection_attempt_is_safe();
virtual void collect(bool full, bool collect(bool clear_all_soft_refs);
bool clear_all_soft_refs,
size_t size,
bool is_tlab);
HeapWord* expand_and_allocate(size_t size, bool is_tlab); HeapWord* expand_and_allocate(size_t size, bool is_tlab);

View File

@ -70,14 +70,3 @@ void Generation::print_on(outputStream* st) const {
p2i(_virtual_space.high()), p2i(_virtual_space.high()),
p2i(_virtual_space.high_boundary())); p2i(_virtual_space.high_boundary()));
} }
void Generation::print_summary_info_on(outputStream* st) {
StatRecord* sr = stat_record();
double time = sr->accumulated_time.seconds();
st->print_cr("Accumulated %s generation GC time %3.7f secs, "
"%u GC's, avg GC time %3.7f",
SerialHeap::heap()->is_young_gen(this) ? "young" : "old" ,
time,
sr->invocations,
sr->invocations > 0 ? time / sr->invocations : 0.0);
}

View File

@ -107,20 +107,6 @@ class Generation: public CHeapObj<mtGC> {
return _reserved.contains(p); return _reserved.contains(p);
} }
// Returns "true" iff this generation should be used to allocate an
// object of the given size. Young generations might
// wish to exclude very large objects, for example, since, if allocated
// often, they would greatly increase the frequency of young-gen
// collection.
virtual bool should_allocate(size_t word_size, bool is_tlab) {
bool result = false;
size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
if (!is_tlab || supports_tlab_allocation()) {
result = (word_size > 0) && (word_size < overflow_limit);
}
return result;
}
// Allocate and returns a block of the requested size, or returns "null". // Allocate and returns a block of the requested size, or returns "null".
// Assumes the caller has done any necessary locking. // Assumes the caller has done any necessary locking.
virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0; virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0;
@ -131,31 +117,6 @@ class Generation: public CHeapObj<mtGC> {
// Thread-local allocation buffers // Thread-local allocation buffers
virtual bool supports_tlab_allocation() const { return false; } virtual bool supports_tlab_allocation() const { return false; }
// Returns "true" iff collect() should subsequently be called on this
// this generation. See comment below.
// This is a generic implementation which can be overridden.
//
// Note: in the current (1.4) implementation, when serialHeap's
// incremental_collection_will_fail flag is set, all allocations are
// slow path (the only fast-path place to allocate is DefNew, which
// will be full if the flag is set).
// Thus, older generations which collect younger generations should
// test this flag and collect if it is set.
virtual bool should_collect(bool full,
size_t word_size,
bool is_tlab) {
return (full || should_allocate(word_size, is_tlab));
}
// Perform a garbage collection.
// If full is true attempt a full garbage collection of this generation.
// Otherwise, attempting to (at least) free enough space to support an
// allocation of the given "word_size".
virtual void collect(bool full,
bool clear_all_soft_refs,
size_t word_size,
bool is_tlab) = 0;
// Perform a heap collection, attempting to create (at least) enough // Perform a heap collection, attempting to create (at least) enough
// space to support an allocation of the given "word_size". If // space to support an allocation of the given "word_size". If
// successful, perform the allocation and return the resulting // successful, perform the allocation and return the resulting
@ -172,20 +133,7 @@ class Generation: public CHeapObj<mtGC> {
virtual void verify() = 0; virtual void verify() = 0;
struct StatRecord {
int invocations;
elapsedTimer accumulated_time;
StatRecord() :
invocations(0),
accumulated_time(elapsedTimer()) {}
};
private:
StatRecord _stat_record;
public: public:
StatRecord* stat_record() { return &_stat_record; }
virtual void print_summary_info_on(outputStream* st);
// Performance Counter support // Performance Counter support
virtual void update_counters() = 0; virtual void update_counters() = 0;
virtual CollectorCounters* counters() { return _gc_counters; } virtual CollectorCounters* counters() { return _gc_counters; }

View File

@ -314,7 +314,7 @@ HeapWord* SerialHeap::mem_allocate_work(size_t size,
for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
// First allocation attempt is lock-free. // First allocation attempt is lock-free.
Generation *young = _young_gen; DefNewGeneration *young = _young_gen;
if (young->should_allocate(size, is_tlab)) { if (young->should_allocate(size, is_tlab)) {
result = young->par_allocate(size, is_tlab); result = young->par_allocate(size, is_tlab);
if (result != nullptr) { if (result != nullptr) {
@ -379,7 +379,7 @@ HeapWord* SerialHeap::mem_allocate_work(size_t size,
gc_count_before = total_collections(); gc_count_before = total_collections();
} }
VM_GenCollectForAllocation op(size, is_tlab, gc_count_before); VM_SerialCollectForAllocation op(size, is_tlab, gc_count_before);
VMThread::execute(&op); VMThread::execute(&op);
if (op.prologue_succeeded()) { if (op.prologue_succeeded()) {
result = op.result(); result = op.result();
@ -432,200 +432,61 @@ bool SerialHeap::must_clear_all_soft_refs() {
_gc_cause == GCCause::_wb_full_gc; _gc_cause == GCCause::_wb_full_gc;
} }
void SerialHeap::collect_generation(Generation* gen, bool full, size_t size, bool SerialHeap::is_young_gc_safe() const {
bool is_tlab, bool run_verification, bool clear_soft_refs) { if (!_young_gen->to()->is_empty()) {
FormatBuffer<> title("Collect gen: %s", gen->short_name()); return false;
GCTraceTime(Trace, gc, phases) t1(title); }
TraceCollectorStats tcs(gen->counters()); return _old_gen->promotion_attempt_is_safe(_young_gen->used());
TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause(), heap()->is_young_gen(gen) ? "end of minor GC" : "end of major GC"); }
gen->stat_record()->invocations++; bool SerialHeap::do_young_collection(bool clear_soft_refs) {
gen->stat_record()->accumulated_time.start(); if (!is_young_gc_safe()) {
return false;
}
IsSTWGCActiveMark gc_active_mark;
SvcGCMarker sgcm(SvcGCMarker::MINOR);
GCIdMark gc_id_mark;
GCTraceCPUTime tcpu(_young_gen->gc_tracer());
GCTraceTime(Info, gc) t("Pause Young", nullptr, gc_cause(), true);
TraceCollectorStats tcs(_young_gen->counters());
TraceMemoryManagerStats tmms(_young_gen->gc_manager(), gc_cause(), "end of minor GC");
print_heap_before_gc();
const PreGenGCValues pre_gc_values = get_pre_gc_values();
// Must be done anew before each collection because
// a previous collection will do mangling and will
// change top of some spaces.
record_gen_tops_before_GC(); record_gen_tops_before_GC();
increment_total_collections(false);
log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize); const bool should_verify = total_collections() >= VerifyGCStartAt;
if (should_verify && VerifyBeforeGC) {
if (run_verification && VerifyBeforeGC) { prepare_for_verify();
Universe::verify("Before GC"); Universe::verify("Before GC");
} }
gc_prologue(false);
COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear()); COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
// Do collection work save_marks();
{
save_marks(); // save marks for all gens
gen->collect(full, clear_soft_refs, size, is_tlab); bool result = _young_gen->collect(clear_soft_refs);
}
COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers()); COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
gen->stat_record()->accumulated_time.stop(); update_gc_stats(_young_gen, false);
update_gc_stats(gen, full); if (should_verify && VerifyAfterGC) {
if (run_verification && VerifyAfterGC) {
Universe::verify("After GC"); Universe::verify("After GC");
} }
}
void SerialHeap::do_collection(bool full, _young_gen->compute_new_size();
bool clear_all_soft_refs,
size_t size,
bool is_tlab,
GenerationType max_generation) {
ResourceMark rm;
DEBUG_ONLY(Thread* my_thread = Thread::current();)
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); print_heap_change(pre_gc_values);
assert(my_thread->is_VM_thread(), "only VM thread");
assert(Heap_lock->is_locked(),
"the requesting thread should have the Heap_lock");
guarantee(!is_stw_gc_active(), "collection is not reentrant");
if (GCLocker::check_active_before_gc()) { // Track memory usage and detect low memory after GC finishes
return; // GC is disabled (e.g. JNI GetXXXCritical operation) MemoryService::track_memory_usage();
}
const bool do_clear_all_soft_refs = clear_all_soft_refs || gc_epilogue(false);
soft_ref_policy()->should_clear_all_soft_refs();
ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy()); print_heap_after_gc();
IsSTWGCActiveMark active_gc_mark; return result;
bool complete = full && (max_generation == OldGen);
bool old_collects_young = complete;
bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
const PreGenGCValues pre_gc_values = get_pre_gc_values();
bool run_verification = total_collections() >= VerifyGCStartAt;
bool prepared_for_verification = false;
bool do_full_collection = false;
if (do_young_collection) {
GCIdMark gc_id_mark;
GCTraceCPUTime tcpu(((DefNewGeneration*)_young_gen)->gc_tracer());
GCTraceTime(Info, gc) t("Pause Young", nullptr, gc_cause(), true);
print_heap_before_gc();
if (run_verification && VerifyBeforeGC) {
prepare_for_verify();
prepared_for_verification = true;
}
gc_prologue(complete);
increment_total_collections(complete);
collect_generation(_young_gen,
full,
size,
is_tlab,
run_verification,
do_clear_all_soft_refs);
if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
// Allocation request was met by young GC.
size = 0;
}
// Ask if young collection is enough. If so, do the final steps for young collection,
// and fallthrough to the end.
do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
if (!do_full_collection) {
// Adjust generation sizes.
_young_gen->compute_new_size();
print_heap_change(pre_gc_values);
// Track memory usage and detect low memory after GC finishes
MemoryService::track_memory_usage();
gc_epilogue(complete);
}
print_heap_after_gc();
} else {
// No young collection, ask if we need to perform Full collection.
do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
}
if (do_full_collection) {
GCIdMark gc_id_mark;
GCTraceCPUTime tcpu(SerialFullGC::gc_tracer());
GCTraceTime(Info, gc) t("Pause Full", nullptr, gc_cause(), true);
print_heap_before_gc();
if (!prepared_for_verification && run_verification && VerifyBeforeGC) {
prepare_for_verify();
}
if (!do_young_collection) {
gc_prologue(complete);
increment_total_collections(complete);
}
// Accounting quirk: total full collections would be incremented when "complete"
// is set, by calling increment_total_collections above. However, we also need to
// account Full collections that had "complete" unset.
if (!complete) {
increment_total_full_collections();
}
CodeCache::on_gc_marking_cycle_start();
ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
false /* unregister_nmethods_during_purge */,
false /* lock_nmethod_free_separately */);
collect_generation(_old_gen,
full,
size,
is_tlab,
run_verification,
do_clear_all_soft_refs);
CodeCache::on_gc_marking_cycle_finish();
CodeCache::arm_all_nmethods();
// Adjust generation sizes.
_old_gen->compute_new_size();
_young_gen->compute_new_size();
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
ClassLoaderDataGraph::purge(/*at_safepoint*/true);
DEBUG_ONLY(MetaspaceUtils::verify();)
// Need to clear claim bits for the next mark.
ClassLoaderDataGraph::clear_claimed_marks();
// Resize the metaspace capacity after full collections
MetaspaceGC::compute_new_size();
print_heap_change(pre_gc_values);
// Track memory usage and detect low memory after GC finishes
MemoryService::track_memory_usage();
// Need to tell the epilogue code we are done with Full GC, regardless what was
// the initial value for "complete" flag.
gc_epilogue(true);
print_heap_after_gc();
}
}
bool SerialHeap::should_do_full_collection(size_t size, bool full, bool is_tlab,
SerialHeap::GenerationType max_gen) const {
return max_gen == OldGen && _old_gen->should_collect(full, size, is_tlab);
} }
void SerialHeap::register_nmethod(nmethod* nm) { void SerialHeap::register_nmethod(nmethod* nm) {
@ -649,10 +510,11 @@ void SerialHeap::prune_unlinked_nmethods() {
} }
HeapWord* SerialHeap::satisfy_failed_allocation(size_t size, bool is_tlab) { HeapWord* SerialHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
GCCauseSetter x(this, GCCause::_allocation_failure); assert(size != 0, "precondition");
HeapWord* result = nullptr; HeapWord* result = nullptr;
assert(size != 0, "Precondition violated"); GCLocker::check_active_before_gc();
if (GCLocker::is_active_and_needs_gc()) { if (GCLocker::is_active_and_needs_gc()) {
// GC locker is active; instead of a collection we will attempt // GC locker is active; instead of a collection we will attempt
// to expand the heap, if there's room for expansion. // to expand the heap, if there's room for expansion.
@ -660,30 +522,14 @@ HeapWord* SerialHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
result = expand_heap_and_allocate(size, is_tlab); result = expand_heap_and_allocate(size, is_tlab);
} }
return result; // Could be null if we are out of space. return result; // Could be null if we are out of space.
} else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
// Do an incremental collection.
do_collection(false, // full
false, // clear_all_soft_refs
size, // size
is_tlab, // is_tlab
SerialHeap::OldGen); // max_generation
} else {
log_trace(gc)(" :: Trying full because partial may fail :: ");
// Try a full collection; see delta for bug id 6266275
// for the original code and why this has been simplified
// with from-space allocation criteria modified and
// such allocation moved out of the safepoint path.
do_collection(true, // full
false, // clear_all_soft_refs
size, // size
is_tlab, // is_tlab
SerialHeap::OldGen); // max_generation
} }
result = attempt_allocation(size, is_tlab, false /*first_only*/); // If young-gen can handle this allocation, attempt young-gc firstly.
bool should_run_young_gc = _young_gen->should_allocate(size, is_tlab);
collect_at_safepoint(!should_run_young_gc);
result = attempt_allocation(size, is_tlab, false /*first_only*/);
if (result != nullptr) { if (result != nullptr) {
assert(is_in_reserved(result), "result not in heap");
return result; return result;
} }
@ -700,17 +546,17 @@ HeapWord* SerialHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
// attempt fails, an OOM exception will be thrown. // attempt fails, an OOM exception will be thrown.
{ {
UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
const bool clear_all_soft_refs = true;
do_collection(true, // full do_full_collection_no_gc_locker(clear_all_soft_refs);
true, // clear_all_soft_refs
size, // size
is_tlab, // is_tlab
SerialHeap::OldGen); // max_generation
} }
result = attempt_allocation(size, is_tlab, false /* first_only */); result = attempt_allocation(size, is_tlab, false /* first_only */);
if (result != nullptr) { if (result != nullptr) {
assert(is_in_reserved(result), "result not in heap"); return result;
}
// The previous full-gc can shrink the heap, so re-expand it.
result = expand_heap_and_allocate(size, is_tlab);
if (result != nullptr) {
return result; return result;
} }
@ -786,6 +632,28 @@ void SerialHeap::scan_evacuated_objs(YoungGenScanClosure* young_cl,
guarantee(young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan"); guarantee(young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan");
} }
void SerialHeap::try_collect_at_safepoint(bool full) {
assert(SafepointSynchronize::is_at_safepoint(), "precondition");
if (GCLocker::check_active_before_gc()) {
return;
}
collect_at_safepoint(full);
}
void SerialHeap::collect_at_safepoint(bool full) {
assert(!GCLocker::is_active(), "precondition");
bool clear_soft_refs = must_clear_all_soft_refs();
if (!full) {
bool success = do_young_collection(clear_soft_refs);
if (success) {
return;
}
// Upgrade to Full-GC if young-gc fails
}
do_full_collection_no_gc_locker(clear_soft_refs);
}
// public collection interfaces // public collection interfaces
void SerialHeap::collect(GCCause::Cause cause) { void SerialHeap::collect(GCCause::Cause cause) {
// The caller doesn't have the Heap_lock // The caller doesn't have the Heap_lock
@ -809,13 +677,11 @@ void SerialHeap::collect(GCCause::Cause cause) {
|| (cause == GCCause::_gc_locker) || (cause == GCCause::_gc_locker)
DEBUG_ONLY(|| (cause == GCCause::_scavenge_alot)); DEBUG_ONLY(|| (cause == GCCause::_scavenge_alot));
const GenerationType max_generation = should_run_young_gc
? YoungGen
: OldGen;
while (true) { while (true) {
VM_GenCollectFull op(gc_count_before, full_gc_count_before, VM_SerialGCCollect op(!should_run_young_gc,
cause, max_generation); gc_count_before,
full_gc_count_before,
cause);
VMThread::execute(&op); VMThread::execute(&op);
if (!GCCause::is_explicit_full_gc(cause)) { if (!GCCause::is_explicit_full_gc(cause)) {
@ -838,27 +704,83 @@ void SerialHeap::collect(GCCause::Cause cause) {
} }
void SerialHeap::do_full_collection(bool clear_all_soft_refs) { void SerialHeap::do_full_collection(bool clear_all_soft_refs) {
do_full_collection(clear_all_soft_refs, OldGen); if (GCLocker::check_active_before_gc()) {
return;
}
do_full_collection_no_gc_locker(clear_all_soft_refs);
} }
void SerialHeap::do_full_collection(bool clear_all_soft_refs, void SerialHeap::do_full_collection_no_gc_locker(bool clear_all_soft_refs) {
GenerationType last_generation) { IsSTWGCActiveMark gc_active_mark;
do_collection(true, // full SvcGCMarker sgcm(SvcGCMarker::FULL);
clear_all_soft_refs, // clear_all_soft_refs GCIdMark gc_id_mark;
0, // size GCTraceCPUTime tcpu(SerialFullGC::gc_tracer());
false, // is_tlab GCTraceTime(Info, gc) t("Pause Full", nullptr, gc_cause(), true);
last_generation); // last_generation TraceCollectorStats tcs(_old_gen->counters());
// Hack XXX FIX ME !!! TraceMemoryManagerStats tmms(_old_gen->gc_manager(), gc_cause(), "end of major GC");
// A scavenge may not have been attempted, or may have const PreGenGCValues pre_gc_values = get_pre_gc_values();
// been attempted and failed, because the old gen was too full print_heap_before_gc();
if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed"); increment_total_collections(true);
// This time allow the old gen to be collected as well const bool should_verify = total_collections() >= VerifyGCStartAt;
do_collection(true, // full if (should_verify && VerifyBeforeGC) {
clear_all_soft_refs, // clear_all_soft_refs prepare_for_verify();
0, // size Universe::verify("Before GC");
false, // is_tlab }
OldGen); // last_generation
gc_prologue(true);
COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
CodeCache::on_gc_marking_cycle_start();
ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
false /* unregister_nmethods_during_purge */,
false /* lock_nmethod_free_separately */);
STWGCTimer* gc_timer = SerialFullGC::gc_timer();
gc_timer->register_gc_start();
SerialOldTracer* gc_tracer = SerialFullGC::gc_tracer();
gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
pre_full_gc_dump(gc_timer);
SerialFullGC::invoke_at_safepoint(clear_all_soft_refs);
post_full_gc_dump(gc_timer);
gc_timer->register_gc_end();
gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
CodeCache::on_gc_marking_cycle_finish();
CodeCache::arm_all_nmethods();
COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
// Adjust generation sizes.
_old_gen->compute_new_size();
_young_gen->compute_new_size();
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
ClassLoaderDataGraph::purge(/*at_safepoint*/true);
DEBUG_ONLY(MetaspaceUtils::verify();)
// Need to clear claim bits for the next mark.
ClassLoaderDataGraph::clear_claimed_marks();
// Resize the metaspace capacity after full collections
MetaspaceGC::compute_new_size();
print_heap_change(pre_gc_values);
// Track memory usage and detect low memory after GC finishes
MemoryService::track_memory_usage();
// Need to tell the epilogue code we are done with Full GC, regardless what was
// the initial value for "complete" flag.
gc_epilogue(true);
print_heap_after_gc();
if (should_verify && VerifyAfterGC) {
Universe::verify("After GC");
} }
} }
@ -982,11 +904,7 @@ bool SerialHeap::print_location(outputStream* st, void* addr) const {
} }
void SerialHeap::print_tracing_info() const { void SerialHeap::print_tracing_info() const {
if (log_is_enabled(Debug, gc, heap, exit)) { // Does nothing
LogStreamHandle(Debug, gc, heap, exit) lsh;
_young_gen->print_summary_info_on(&lsh);
_old_gen->print_summary_info_on(&lsh);
}
} }
void SerialHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const { void SerialHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {

View File

@ -67,8 +67,6 @@ class SerialHeap : public CollectedHeap {
friend class DefNewGeneration; friend class DefNewGeneration;
friend class TenuredGeneration; friend class TenuredGeneration;
friend class SerialFullGC; friend class SerialFullGC;
friend class VM_GenCollectForAllocation;
friend class VM_GenCollectFull;
friend class VM_GC_HeapInspection; friend class VM_GC_HeapInspection;
friend class VM_HeapDumper; friend class VM_HeapDumper;
friend class HeapInspection; friend class HeapInspection;
@ -87,7 +85,7 @@ private:
TenuredGeneration* _old_gen; TenuredGeneration* _old_gen;
HeapWord* _young_gen_saved_top; HeapWord* _young_gen_saved_top;
HeapWord* _old_gen_saved_top; HeapWord* _old_gen_saved_top;
private:
// The singleton CardTable Remembered Set. // The singleton CardTable Remembered Set.
CardTableRS* _rem_set; CardTableRS* _rem_set;
@ -98,16 +96,13 @@ private:
// condition that caused that incremental collection to fail. // condition that caused that incremental collection to fail.
bool _incremental_collection_failed; bool _incremental_collection_failed;
// Collects the given generation. bool do_young_collection(bool clear_soft_refs);
void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
bool run_verification, bool clear_soft_refs);
// Reserve aligned space for the heap as needed by the contained generations. // Reserve aligned space for the heap as needed by the contained generations.
ReservedHeapSpace allocate(size_t alignment); ReservedHeapSpace allocate(size_t alignment);
PreGenGCValues get_pre_gc_values() const; PreGenGCValues get_pre_gc_values() const;
private:
GCMemoryManager* _young_manager; GCMemoryManager* _young_manager;
GCMemoryManager* _old_manager; GCMemoryManager* _old_manager;
@ -116,29 +111,17 @@ private:
bool is_tlab, bool is_tlab,
bool first_only); bool first_only);
// Helper function for two callbacks below.
// Considers collection of the first max_level+1 generations.
void do_collection(bool full,
bool clear_all_soft_refs,
size_t size,
bool is_tlab,
GenerationType max_generation);
// Callback from VM_GenCollectForAllocation operation.
// This function does everything necessary/possible to satisfy an
// allocation request that failed in the youngest generation that should
// have handled it (including collection, expansion, etc.)
HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
// Callback from VM_GenCollectFull operation.
// Perform a full collection of the first max_level+1 generations.
void do_full_collection(bool clear_all_soft_refs) override; void do_full_collection(bool clear_all_soft_refs) override;
void do_full_collection(bool clear_all_soft_refs, GenerationType max_generation); void do_full_collection_no_gc_locker(bool clear_all_soft_refs);
void collect_at_safepoint(bool full);
// Does the "cause" of GC indicate that // Does the "cause" of GC indicate that
// we absolutely __must__ clear soft refs? // we absolutely __must__ clear soft refs?
bool must_clear_all_soft_refs(); bool must_clear_all_soft_refs();
bool is_young_gc_safe() const;
public: public:
// Returns JNI_OK on success // Returns JNI_OK on success
jint initialize() override; jint initialize() override;
@ -159,6 +142,15 @@ public:
HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) override; HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) override;
// Callback from VM_SerialCollectForAllocation operation.
// This function does everything necessary/possible to satisfy an
// allocation request that failed in the youngest generation that should
// have handled it (including collection, expansion, etc.)
HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
// Callback from VM_SerialGCCollect.
void try_collect_at_safepoint(bool full);
// Perform a full collection of the heap; intended for use in implementing // Perform a full collection of the heap; intended for use in implementing
// "System.gc". This implies as full a collection as the CollectedHeap // "System.gc". This implies as full a collection as the CollectedHeap
// supports. Caller does not hold the Heap_lock on entry. // supports. Caller does not hold the Heap_lock on entry.
@ -305,10 +297,6 @@ private:
// Save the tops of the spaces in all generations // Save the tops of the spaces in all generations
void record_gen_tops_before_GC() PRODUCT_RETURN; void record_gen_tops_before_GC() PRODUCT_RETURN;
// Return true if we need to perform full collection.
bool should_do_full_collection(size_t size, bool full,
bool is_tlab, GenerationType max_gen) const;
private: private:
MemoryPool* _eden_pool; MemoryPool* _eden_pool;
MemoryPool* _survivor_pool; MemoryPool* _survivor_pool;

View File

@ -26,9 +26,7 @@
#include "gc/serial/serialVMOperations.hpp" #include "gc/serial/serialVMOperations.hpp"
#include "gc/shared/gcLocker.hpp" #include "gc/shared/gcLocker.hpp"
void VM_GenCollectForAllocation::doit() { void VM_SerialCollectForAllocation::doit() {
SvcGCMarker sgcm(SvcGCMarker::MINOR);
SerialHeap* gch = SerialHeap::heap(); SerialHeap* gch = SerialHeap::heap();
GCCauseSetter gccs(gch, _gc_cause); GCCauseSetter gccs(gch, _gc_cause);
_result = gch->satisfy_failed_allocation(_word_size, _tlab); _result = gch->satisfy_failed_allocation(_word_size, _tlab);
@ -39,10 +37,8 @@ void VM_GenCollectForAllocation::doit() {
} }
} }
void VM_GenCollectFull::doit() { void VM_SerialGCCollect::doit() {
SvcGCMarker sgcm(SvcGCMarker::FULL);
SerialHeap* gch = SerialHeap::heap(); SerialHeap* gch = SerialHeap::heap();
GCCauseSetter gccs(gch, _gc_cause); GCCauseSetter gccs(gch, _gc_cause);
gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation); gch->try_collect_at_safepoint(_full);
} }

View File

@ -28,37 +28,32 @@
#include "gc/serial/serialHeap.hpp" #include "gc/serial/serialHeap.hpp"
#include "gc/shared/gcVMOperations.hpp" #include "gc/shared/gcVMOperations.hpp"
class VM_GenCollectForAllocation : public VM_CollectForAllocation { class VM_SerialCollectForAllocation : public VM_CollectForAllocation {
private: private:
bool _tlab; // alloc is of a tlab. bool _tlab; // alloc is of a tlab.
public: public:
VM_GenCollectForAllocation(size_t word_size, VM_SerialCollectForAllocation(size_t word_size,
bool tlab, bool tlab,
uint gc_count_before) uint gc_count_before)
: VM_CollectForAllocation(word_size, gc_count_before, GCCause::_allocation_failure), : VM_CollectForAllocation(word_size, gc_count_before, GCCause::_allocation_failure),
_tlab(tlab) { _tlab(tlab) {
assert(word_size != 0, "An allocation should always be requested with this operation."); assert(word_size != 0, "An allocation should always be requested with this operation.");
} }
~VM_GenCollectForAllocation() {} virtual VMOp_Type type() const { return VMOp_SerialCollectForAllocation; }
virtual VMOp_Type type() const { return VMOp_GenCollectForAllocation; }
virtual void doit(); virtual void doit();
}; };
// VM operation to invoke a collection of the heap as a // VM operation to invoke a collection of the heap as a
// SerialHeap heap. // SerialHeap heap.
class VM_GenCollectFull: public VM_GC_Operation { class VM_SerialGCCollect: public VM_GC_Operation {
private:
SerialHeap::GenerationType _max_generation;
public: public:
VM_GenCollectFull(uint gc_count_before, VM_SerialGCCollect(bool full,
uint full_gc_count_before, uint gc_count_before,
GCCause::Cause gc_cause, uint full_gc_count_before,
SerialHeap::GenerationType max_generation) GCCause::Cause gc_cause)
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, full) {}
max_generation != SerialHeap::YoungGen /* full */),
_max_generation(max_generation) { } virtual VMOp_Type type() const { return VMOp_SerialGCCollect; }
~VM_GenCollectFull() {}
virtual VMOp_Type type() const { return VMOp_GenCollectFull; }
virtual void doit(); virtual void doit();
}; };

View File

@ -453,29 +453,6 @@ oop TenuredGeneration::promote(oop obj, size_t obj_size) {
return new_obj; return new_obj;
} }
void TenuredGeneration::collect(bool full,
bool clear_all_soft_refs,
size_t size,
bool is_tlab) {
SerialHeap* gch = SerialHeap::heap();
STWGCTimer* gc_timer = SerialFullGC::gc_timer();
gc_timer->register_gc_start();
SerialOldTracer* gc_tracer = SerialFullGC::gc_tracer();
gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
gch->pre_full_gc_dump(gc_timer);
SerialFullGC::invoke_at_safepoint(clear_all_soft_refs);
gch->post_full_gc_dump(gc_timer);
gc_timer->register_gc_end();
gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
}
HeapWord* HeapWord*
TenuredGeneration::expand_and_allocate(size_t word_size, bool is_tlab) { TenuredGeneration::expand_and_allocate(size_t word_size, bool is_tlab) {
assert(!is_tlab, "TenuredGeneration does not support TLAB allocation"); assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");

View File

@ -135,11 +135,6 @@ public:
virtual inline HeapWord* allocate(size_t word_size, bool is_tlab); virtual inline HeapWord* allocate(size_t word_size, bool is_tlab);
virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab); virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
virtual void collect(bool full,
bool clear_all_soft_refs,
size_t size,
bool is_tlab);
HeapWord* expand_and_allocate(size_t size, bool is_tlab); HeapWord* expand_and_allocate(size_t size, bool is_tlab);
void gc_prologue(); void gc_prologue();
@ -149,6 +144,15 @@ public:
size_t word_size, size_t word_size,
bool is_tlab); bool is_tlab);
bool should_allocate(size_t word_size, bool is_tlab) {
bool result = false;
size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
if (!is_tlab || supports_tlab_allocation()) {
result = (word_size > 0) && (word_size < overflow_limit);
}
return result;
}
// Performance Counter support // Performance Counter support
void update_counters(); void update_counters();

View File

@ -36,10 +36,6 @@
static_field) \ static_field) \
nonstatic_field(Generation, _reserved, MemRegion) \ nonstatic_field(Generation, _reserved, MemRegion) \
nonstatic_field(Generation, _virtual_space, VirtualSpace) \ nonstatic_field(Generation, _virtual_space, VirtualSpace) \
nonstatic_field(Generation, _stat_record, Generation::StatRecord) \
\
nonstatic_field(Generation::StatRecord, invocations, int) \
nonstatic_field(Generation::StatRecord, accumulated_time, elapsedTimer) \
\ \
nonstatic_field(TenuredGeneration, _rs, CardTableRS*) \ nonstatic_field(TenuredGeneration, _rs, CardTableRS*) \
nonstatic_field(TenuredGeneration, _bts, SerialBlockOffsetTable*) \ nonstatic_field(TenuredGeneration, _bts, SerialBlockOffsetTable*) \
@ -67,7 +63,6 @@
declare_toplevel_type, \ declare_toplevel_type, \
declare_integer_type) \ declare_integer_type) \
declare_toplevel_type(Generation) \ declare_toplevel_type(Generation) \
declare_toplevel_type(Generation::StatRecord) \
declare_type(SerialHeap, CollectedHeap) \ declare_type(SerialHeap, CollectedHeap) \
declare_type(TenuredGeneration, Generation) \ declare_type(TenuredGeneration, Generation) \
\ \

View File

@ -41,10 +41,10 @@
// VM_GC_Operation // VM_GC_Operation
// VM_GC_HeapInspection // VM_GC_HeapInspection
// VM_PopulateDynamicDumpSharedSpace // VM_PopulateDynamicDumpSharedSpace
// VM_GenCollectFull // VM_SerialGCCollect
// VM_ParallelGCSystemGC // VM_ParallelGCSystemGC
// VM_CollectForAllocation // VM_CollectForAllocation
// VM_GenCollectForAllocation // VM_SerialCollectForAllocation
// VM_ParallelGCFailedAllocation // VM_ParallelGCFailedAllocation
// VM_Verify // VM_Verify
// VM_PopulateDumpSharedSpace // VM_PopulateDumpSharedSpace
@ -63,13 +63,13 @@
// is specified; and also the attach "inspectheap" operation // is specified; and also the attach "inspectheap" operation
// //
// VM_CollectForAllocation // VM_CollectForAllocation
// VM_GenCollectForAllocation // VM_SerialCollectForAllocation
// VM_ParallelGCFailedAllocation // VM_ParallelGCFailedAllocation
// - this operation is invoked when allocation is failed; // - this operation is invoked when allocation is failed;
// operation performs garbage collection and tries to // operation performs garbage collection and tries to
// allocate afterwards; // allocate afterwards;
// //
// VM_GenCollectFull // VM_SerialGCCollect
// VM_ParallelGCSystemGC // VM_ParallelGCSystemGC
// - these operations perform full collection of heaps of // - these operations perform full collection of heaps of
// different kind // different kind

View File

@ -50,8 +50,8 @@
template(CollectForMetadataAllocation) \ template(CollectForMetadataAllocation) \
template(CollectForCodeCacheAllocation) \ template(CollectForCodeCacheAllocation) \
template(GC_HeapInspection) \ template(GC_HeapInspection) \
template(GenCollectFull) \ template(SerialCollectForAllocation) \
template(GenCollectForAllocation) \ template(SerialGCCollect) \
template(ParallelGCFailedAllocation) \ template(ParallelGCFailedAllocation) \
template(ParallelGCSystemGC) \ template(ParallelGCSystemGC) \
template(G1CollectForAllocation) \ template(G1CollectForAllocation) \

View File

@ -50,9 +50,6 @@ public abstract class Generation extends VMObject {
private static long reservedFieldOffset; private static long reservedFieldOffset;
private static long virtualSpaceFieldOffset; private static long virtualSpaceFieldOffset;
protected static final int K = 1024; protected static final int K = 1024;
// Fields for class StatRecord
private static Field statRecordField;
private static CIntegerField invocationField;
static { static {
VM.registerVMInitializedObserver(new Observer() { VM.registerVMInitializedObserver(new Observer() {
@ -67,20 +64,12 @@ public abstract class Generation extends VMObject {
reservedFieldOffset = type.getField("_reserved").getOffset(); reservedFieldOffset = type.getField("_reserved").getOffset();
virtualSpaceFieldOffset = type.getField("_virtual_space").getOffset(); virtualSpaceFieldOffset = type.getField("_virtual_space").getOffset();
// StatRecord
statRecordField = type.getField("_stat_record");
type = db.lookupType("Generation::StatRecord");
invocationField = type.getCIntegerField("invocations");
} }
public Generation(Address addr) { public Generation(Address addr) {
super(addr); super(addr);
} }
public int invocations() {
return getStatRecord().getInvocations();
}
/** The maximum number of object bytes the generation can currently /** The maximum number of object bytes the generation can currently
hold. */ hold. */
public abstract long capacity(); public abstract long capacity();
@ -123,19 +112,4 @@ public abstract class Generation extends VMObject {
public void print() { printOn(System.out); } public void print() { printOn(System.out); }
public abstract void printOn(PrintStream tty); public abstract void printOn(PrintStream tty);
public static class StatRecord extends VMObject {
public StatRecord(Address addr) {
super(addr);
}
public int getInvocations() {
return (int) invocationField.getValue(addr);
}
}
private StatRecord getStatRecord() {
return VMObjectFactory.newObject(StatRecord.class, addr.addOffsetTo(statRecordField.getOffset()));
}
} }

View File

@ -95,11 +95,11 @@ public class SerialHeap extends CollectedHeap {
public void printOn(PrintStream tty) { public void printOn(PrintStream tty) {
tty.println("SerialHeap:"); tty.println("SerialHeap:");
tty.println("Young Generation - Invocations: " + youngGen().invocations()); tty.println("Young Generation: ");
youngGen().printOn(tty); youngGen().printOn(tty);
tty.println(); tty.println();
tty.println("Old Generation - Invocations: " + oldGen().invocations()); tty.println("Old Generation: ");
oldGen().printOn(tty); oldGen().printOn(tty);
tty.println(); tty.println();
} }