8270009: Factor out and shuffle methods in G1CollectedHeap::do_collection_pause_at_safepoint_helper

Reviewed-by: kbarrett, ayang
This commit is contained in:
Thomas Schatzl 2021-07-13 11:27:41 +00:00
parent 6b123b0591
commit 375fc2a2b2
3 changed files with 134 additions and 105 deletions

View File

@ -2570,24 +2570,6 @@ void G1CollectedHeap::gc_prologue(bool full) {
if (full || collector_state()->in_concurrent_start_gc()) {
increment_old_marking_cycles_started();
}
// Fill TLAB's and such
{
Ticks start = Ticks::now();
ensure_parsability(true);
Tickspan dt = Ticks::now() - start;
phase_times()->record_prepare_tlab_time_ms(dt.seconds() * MILLIUNITS);
}
if (!full) {
// Flush dirty card queues to qset, so later phases don't need to account
// for partially filled per-thread queues and such. Not needed for full
// collections, which ignore those logs.
Ticks start = Ticks::now();
G1BarrierSet::dirty_card_queue_set().concatenate_logs();
Tickspan dt = Ticks::now() - start;
phase_times()->record_concatenate_dirty_card_logs_time_ms(dt.seconds() * MILLIUNITS);
}
}
void G1CollectedHeap::gc_epilogue(bool full) {
@ -2601,10 +2583,6 @@ void G1CollectedHeap::gc_epilogue(bool full) {
assert(DerivedPointerTable::is_empty(), "derived pointer present");
#endif
double start = os::elapsedTime();
resize_all_tlabs();
phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
// We have just completed a GC. Update the soft reference
// policy with the new heap occupancy
Universe::heap()->update_capacity_and_used_at_gc();
@ -2798,6 +2776,9 @@ void G1CollectedHeap::start_new_collection_set() {
}
void G1CollectedHeap::calculate_collection_set(G1EvacuationInfo* evacuation_info, double target_pause_time_ms) {
// Forget the current allocation region (we might even choose it to be part
// of the collection set!) before finalizing the collection set.
_allocator->release_mutator_alloc_regions();
_collection_set.finalize_initial_collection_set(target_pause_time_ms, &_survivor);
evacuation_info->set_collectionset_regions(collection_set()->region_length() +
@ -2999,16 +2980,7 @@ public:
}
};
void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_pause_time_ms) {
GCIdMark gc_id_mark;
SvcGCMarker sgcm(SvcGCMarker::MINOR);
ResourceMark rm;
policy()->note_gc_start();
wait_for_root_region_scanning();
bool G1CollectedHeap::determine_start_concurrent_mark_gc(){
// We should not be doing concurrent start unless the concurrent mark thread is running
if (!_cm_thread->should_terminate()) {
// This call will decide whether this pause is a concurrent start
@ -3023,24 +2995,66 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
// We also do not allow mixed GCs during marking.
assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
return collector_state()->in_concurrent_start_gc();
}
void G1CollectedHeap::set_young_collection_default_active_worker_threads(){
uint active_workers = WorkerPolicy::calc_active_workers(workers()->total_workers(),
workers()->active_workers(),
Threads::number_of_non_daemon_threads());
active_workers = workers()->update_active_workers(active_workers);
log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
}
void G1CollectedHeap::prepare_tlabs_for_mutator() {
Ticks start = Ticks::now();
_survivor_evac_stats.adjust_desired_plab_sz();
_old_evac_stats.adjust_desired_plab_sz();
allocate_dummy_regions();
_allocator->init_mutator_alloc_regions();
resize_all_tlabs();
phase_times()->record_resize_tlab_time_ms((Ticks::now() - start).seconds() * 1000.0);
}
void G1CollectedHeap::retire_tlabs() {
ensure_parsability(true);
}
void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_pause_time_ms) {
ResourceMark rm;
IsGCActiveMark active_gc_mark;
GCIdMark gc_id_mark;
SvcGCMarker sgcm(SvcGCMarker::MINOR);
GCTraceCPUTime tcpu;
// Record whether this pause may need to trigger a concurrent operation. Later,
// when we signal the G1ConcurrentMarkThread, the collector state has already
// been reset for the next pause.
bool should_start_concurrent_mark_operation = collector_state()->in_concurrent_start_gc();
bool should_start_concurrent_mark_operation = determine_start_concurrent_mark_gc();
bool concurrent_operation_is_full_mark = false;
// Inner scope for scope based logging, timers, and stats collection
{
GCTraceCPUTime tcpu;
// Verification may use the gang workers, so they must be set up before.
// Individual parallel phases may override this.
set_young_collection_default_active_worker_threads();
{
// Do timing/tracing/statistics/pre- and post-logging/verification work not
// directly related to the collection. They should not be accounted for in
// collection work timing.
// The G1YoungGCTraceTime message depends on collector state, so must come after
// determining collector state.
G1YoungGCTraceTime tm(gc_cause());
uint active_workers = WorkerPolicy::calc_active_workers(workers()->total_workers(),
workers()->active_workers(),
Threads::number_of_non_daemon_threads());
active_workers = workers()->update_active_workers(active_workers);
log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
// Young GC internal timing
policy()->note_gc_start();
// JFR
G1YoungGCJFRTracerMark jtm(_gc_timer_stw, _gc_tracer_stw, gc_cause());
// JStat/MXBeans
@ -3050,75 +3064,53 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
G1HeapPrinterMark hpm(this);
// Wait for root region scan here to make sure that it is done before any
// use of the STW work gang to maximize cpu use (i.e. all cores are available
// just to do that).
wait_for_root_region_scanning();
G1HeapVerifier::G1VerifyType verify_type = young_collection_verify_type();
verify_before_young_collection(verify_type);
{
IsGCActiveMark x;
gc_prologue(false);
// Actual collection work starts and is executed (only) in this scope.
G1HeapVerifier::G1VerifyType verify_type = young_collection_verify_type();
verify_before_young_collection(verify_type);
{
// The elapsed time induced by the start time below deliberately elides
// the possible verification above.
double sample_start_time_sec = os::elapsedTime();
// The elapsed time induced by the start time below deliberately elides
// the possible verification above.
double sample_start_time_sec = os::elapsedTime();
policy()->record_collection_pause_start(sample_start_time_sec);
// Please see comment in g1CollectedHeap.hpp and
// G1CollectedHeap::ref_processing_init() to see how
// reference processing currently works in G1.
_ref_processor_stw->start_discovery(false /* always_clear */);
calculate_collection_set(jtm.evacuation_info(), target_pause_time_ms);
policy()->record_collection_pause_start(sample_start_time_sec);
G1RedirtyCardsQueueSet rdcqs(G1BarrierSet::dirty_card_queue_set().allocator());
G1ParScanThreadStateSet per_thread_states(this,
&rdcqs,
workers()->active_workers(),
collection_set()->young_region_length(),
collection_set()->optional_region_length());
pre_evacuate_collection_set(jtm.evacuation_info(), &per_thread_states);
// Forget the current allocation region (we might even choose it to be part
// of the collection set!).
_allocator->release_mutator_alloc_regions();
bool may_do_optional_evacuation = _collection_set.optional_region_length() != 0;
// Actually do the work...
evacuate_initial_collection_set(&per_thread_states, may_do_optional_evacuation);
calculate_collection_set(jtm.evacuation_info(), target_pause_time_ms);
G1RedirtyCardsQueueSet rdcqs(G1BarrierSet::dirty_card_queue_set().allocator());
G1ParScanThreadStateSet per_thread_states(this,
&rdcqs,
workers()->active_workers(),
collection_set()->young_region_length(),
collection_set()->optional_region_length());
pre_evacuate_collection_set(jtm.evacuation_info(), &per_thread_states);
bool may_do_optional_evacuation = _collection_set.optional_region_length() != 0;
// Actually do the work...
evacuate_initial_collection_set(&per_thread_states, may_do_optional_evacuation);
if (may_do_optional_evacuation) {
evacuate_optional_collection_set(&per_thread_states);
}
post_evacuate_collection_set(jtm.evacuation_info(), &rdcqs, &per_thread_states);
start_new_collection_set();
_survivor_evac_stats.adjust_desired_plab_sz();
_old_evac_stats.adjust_desired_plab_sz();
allocate_dummy_regions();
_allocator->init_mutator_alloc_regions();
expand_heap_after_young_collection();
// Refine the type of a concurrent mark operation now that we did the
// evacuation, eventually aborting it.
concurrent_operation_is_full_mark = policy()->concurrent_operation_is_full_mark("Revise IHOP");
// Need to report the collection pause now since record_collection_pause_end()
// modifies it to the next state.
jtm.report_pause_type(collector_state()->young_gc_pause_type(concurrent_operation_is_full_mark));
double sample_end_time_sec = os::elapsedTime();
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
policy()->record_collection_pause_end(pause_time_ms, concurrent_operation_is_full_mark);
if (may_do_optional_evacuation) {
evacuate_optional_collection_set(&per_thread_states);
}
post_evacuate_collection_set(jtm.evacuation_info(), &rdcqs, &per_thread_states);
verify_after_young_collection(verify_type);
// Refine the type of a concurrent mark operation now that we did the
// evacuation, eventually aborting it.
concurrent_operation_is_full_mark = policy()->concurrent_operation_is_full_mark("Revise IHOP");
gc_epilogue(false);
// Need to report the collection pause now since record_collection_pause_end()
// modifies it to the next state.
jtm.report_pause_type(collector_state()->young_gc_pause_type(concurrent_operation_is_full_mark));
double sample_end_time_sec = os::elapsedTime();
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
policy()->record_collection_pause_end(pause_time_ms, concurrent_operation_is_full_mark);
}
verify_after_young_collection(verify_type);
policy()->print_phases();
@ -3128,7 +3120,6 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
// It should now be safe to tell the concurrent mark thread to start
// without its logging output interfering with the logging output
// that came from the pause.
if (should_start_concurrent_mark_operation) {
// CAUTION: after the start_concurrent_cycle() call below, the concurrent marking
// thread(s) could be running concurrently with us. Make sure that anything
@ -3555,11 +3546,33 @@ public:
};
void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo* evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
// Please see comment in g1CollectedHeap.hpp and
// G1CollectedHeap::ref_processing_init() to see how
// reference processing currently works in G1.
_ref_processor_stw->start_discovery(false /* always_clear */);
_bytes_used_during_gc = 0;
_expand_heap_after_alloc_failure = true;
Atomic::store(&_num_regions_failed_evacuation, 0u);
gc_prologue(false);
{
Ticks start = Ticks::now();
retire_tlabs();
phase_times()->record_prepare_tlab_time_ms((Ticks::now() - start).seconds() * 1000.0);
}
{
// Flush dirty card queues to qset, so later phases don't need to account
// for partially filled per-thread queues and such.
Ticks start = Ticks::now();
G1BarrierSet::dirty_card_queue_set().concatenate_logs();
Tickspan dt = Ticks::now() - start;
phase_times()->record_concatenate_dirty_card_logs_time_ms(dt.seconds() * MILLIUNITS);
}
_regions_failed_evacuation.clear();
// Disable the hot card cache.
@ -3853,6 +3866,14 @@ void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo* evacuation_
evacuation_info->set_collectionset_used_before(collection_set()->bytes_used_before());
evacuation_info->set_bytes_used(_bytes_used_during_gc);
start_new_collection_set();
prepare_tlabs_for_mutator();
gc_epilogue(false);
expand_heap_after_young_collection();
}
void G1CollectedHeap::record_obj_copy_mem_stats() {

View File

@ -800,6 +800,14 @@ private:
// of the incremental collection pause, executed by the vm thread.
void do_collection_pause_at_safepoint_helper(double target_pause_time_ms);
void set_young_collection_default_active_worker_threads();
bool determine_start_concurrent_mark_gc();
void prepare_tlabs_for_mutator();
void retire_tlabs();
G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
@ -1451,7 +1459,6 @@ private:
void print_regions_on(outputStream* st) const;
public:
virtual void print_on(outputStream* st) const;
virtual void print_extended_on(outputStream* st) const;
virtual void print_on_error(outputStream* st) const;

View File

@ -174,6 +174,7 @@ void G1FullCollector::prepare_collection() {
_heap->verify_before_full_collection(scope()->is_explicit_gc());
_heap->gc_prologue(true);
_heap->retire_tlabs();
_heap->prepare_heap_for_full_collection();
PrepareRegionsClosure cl(this);
@ -213,12 +214,12 @@ void G1FullCollector::complete_collection() {
_heap->prepare_heap_for_mutators();
_heap->resize_all_tlabs();
_heap->policy()->record_full_collection_end();
_heap->gc_epilogue(true);
_heap->verify_after_full_collection();
_heap->print_heap_after_full_collection();
}
void G1FullCollector::before_marking_update_attribute_table(HeapRegion* hr) {