8129549: G1: Make sure the concurrent thread does not mix its logging with the STW pauses

Reviewed-by: pliden, tschatzl
This commit is contained in:
Bengt Rutisson 2015-06-23 13:26:05 +02:00
parent 8a03d57f9b
commit 7f2ffe8c0e
5 changed files with 60 additions and 66 deletions

View File

@ -1179,6 +1179,8 @@ public:
};
void ConcurrentMark::scanRootRegions() {
double scan_start = os::elapsedTime();
// Start of concurrent marking.
ClassLoaderDataGraph::clear_claimed_marks();
@ -1186,6 +1188,11 @@ void ConcurrentMark::scanRootRegions() {
// at least one root region to scan. So, if it's false, we
// should not attempt to do any further work.
if (root_regions()->scan_in_progress()) {
if (G1Log::fine()) {
gclog_or_tty->gclog_stamp(concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
}
_parallel_marking_threads = calc_parallel_marking_threads();
assert(parallel_marking_threads() <= max_parallel_marking_threads(),
"Maximum number of marking threads exceeded");
@ -1195,6 +1202,11 @@ void ConcurrentMark::scanRootRegions() {
_parallel_workers->set_active_workers(active_workers);
_parallel_workers->run_task(&task);
if (G1Log::fine()) {
gclog_or_tty->gclog_stamp(concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start);
}
// It's possible that has_aborted() is true here without actually
// aborting the survivor scan earlier. This is OK as it's
// mainly used for sanity checking.
@ -2993,6 +3005,11 @@ void ConcurrentMark::print_stats() {
// abandon current marking iteration due to a Full GC
void ConcurrentMark::abort() {
if (!cmThread()->during_cycle() || _has_aborted) {
// We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
return;
}
// Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
// concurrent bitmap clearing.
_nextMarkBitMap->clearAll();
@ -3010,12 +3027,8 @@ void ConcurrentMark::abort() {
}
_first_overflow_barrier_sync.abort();
_second_overflow_barrier_sync.abort();
const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
if (!gc_id.is_undefined()) {
// We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
// to detect that it was aborted. Only keep track of the first GC id that we aborted.
_aborted_gc_id = gc_id;
}
_aborted_gc_id = _g1h->gc_tracer_cm()->gc_id();
assert(!_aborted_gc_id.is_undefined(), "ConcurrentMark::abort() executed more than once?");
_has_aborted = true;
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();

View File

@ -78,7 +78,19 @@ public:
}
};
// We want to avoid that the logging from the concurrent thread is mixed
// with the logging from a STW GC. So, if necessary join the STS to ensure
// that the logging is done either before or after the STW logging.
void ConcurrentMarkThread::cm_log(bool doit, bool join_sts, const char* fmt, ...) {
if (doit) {
SuspendibleThreadSetJoiner sts_joiner(join_sts);
va_list args;
va_start(args, fmt);
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
gclog_or_tty->vprint_cr(fmt, args);
va_end(args);
}
}
void ConcurrentMarkThread::run() {
initialize_in_thread();
@ -110,28 +122,12 @@ void ConcurrentMarkThread::run() {
// without the root regions have been scanned which would be a
// correctness issue.
double scan_start = os::elapsedTime();
if (!cm()->has_aborted()) {
if (G1Log::fine()) {
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
}
_cm->scanRootRegions();
double scan_end = os::elapsedTime();
if (G1Log::fine()) {
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]",
scan_end - scan_start);
}
}
double mark_start_sec = os::elapsedTime();
if (G1Log::fine()) {
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-mark-start]");
}
cm_log(G1Log::fine(), true, "[GC concurrent-mark-start]");
int iter = 0;
do {
@ -151,25 +147,15 @@ void ConcurrentMarkThread::run() {
os::sleep(current_thread, sleep_time_ms, false);
}
if (G1Log::fine()) {
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf secs]",
mark_end_sec - mark_start_sec);
}
cm_log(G1Log::fine(), true, "[GC concurrent-mark-end, %1.7lf secs]", mark_end_sec - mark_start_sec);
CMCheckpointRootsFinalClosure final_cl(_cm);
VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */);
VMThread::execute(&op);
}
if (cm()->restart_for_overflow()) {
if (G1TraceMarkStackOverflow) {
gclog_or_tty->print_cr("Restarting conc marking because of MS overflow "
"in remark (restart #%d).", iter);
}
if (G1Log::fine()) {
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]");
}
cm_log(G1TraceMarkStackOverflow, true, "Restarting conc marking because of MS overflow in remark (restart #%d).", iter);
cm_log(G1Log::fine(), true, "[GC concurrent-mark-restart-for-overflow]");
}
} while (cm()->restart_for_overflow());
@ -209,10 +195,7 @@ void ConcurrentMarkThread::run() {
// reclaimed by cleanup.
double cleanup_start_sec = os::elapsedTime();
if (G1Log::fine()) {
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-cleanup-start]");
}
cm_log(G1Log::fine(), true, "[GC concurrent-cleanup-start]");
// Now do the concurrent cleanup operation.
_cm->completeCleanup();
@ -229,11 +212,7 @@ void ConcurrentMarkThread::run() {
g1h->reset_free_regions_coming();
double cleanup_end_sec = os::elapsedTime();
if (G1Log::fine()) {
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf secs]",
cleanup_end_sec - cleanup_start_sec);
}
cm_log(G1Log::fine(), true, "[GC concurrent-cleanup-end, %1.7lf secs]", cleanup_end_sec - cleanup_start_sec);
}
guarantee(cm()->cleanup_list_is_empty(),
"at this point there should be no regions on the cleanup list");
@ -266,13 +245,8 @@ void ConcurrentMarkThread::run() {
SuspendibleThreadSetJoiner sts_join;
if (!cm()->has_aborted()) {
g1_policy->record_concurrent_mark_cleanup_completed();
}
}
if (cm()->has_aborted()) {
if (G1Log::fine()) {
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-mark-abort]");
} else {
cm_log(G1Log::fine(), false, "[GC concurrent-mark-abort]");
}
}

View File

@ -40,6 +40,7 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
double _vtime_accum; // Accumulated virtual time.
double _vtime_mark_accum;
void cm_log(bool doit, bool join_sts, const char* fmt, ...) ATTRIBUTE_PRINTF(4, 5);
public:
virtual void run();

View File

@ -3867,6 +3867,21 @@ void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
gclog_or_tty->flush();
}
void G1CollectedHeap::wait_for_root_region_scanning() {
double scan_wait_start = os::elapsedTime();
// We have to wait until the CM threads finish scanning the
// root regions as it's the only way to ensure that all the
// objects on them have been correctly scanned before we start
// moving them during the GC.
bool waited = _cm->root_regions()->wait_until_scan_finished();
double wait_time_ms = 0.0;
if (waited) {
double scan_wait_end = os::elapsedTime();
wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
}
g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
}
bool
G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
assert_at_safepoint(true /* should_be_vm_thread */);
@ -3883,6 +3898,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
SvcGCMarker sgcm(SvcGCMarker::MINOR);
ResourceMark rm;
wait_for_root_region_scanning();
G1Log::update_level();
print_heap_before_gc();
trace_heap_before_gc(_gc_tracer_stw);
@ -4004,19 +4021,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
g1_policy()->record_collection_pause_start(sample_start_time_sec);
double scan_wait_start = os::elapsedTime();
// We have to wait until the CM threads finish scanning the
// root regions as it's the only way to ensure that all the
// objects on them have been correctly scanned before we start
// moving them during the GC.
bool waited = _cm->root_regions()->wait_until_scan_finished();
double wait_time_ms = 0.0;
if (waited) {
double scan_wait_end = os::elapsedTime();
wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
}
g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
#if YOUNG_LIST_VERBOSE
gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
_young_list->print();

View File

@ -805,6 +805,8 @@ protected:
bool* succeeded,
GCCause::Cause gc_cause);
void wait_for_root_region_scanning();
// The guts of the incremental collection pause, executed by the vm
// thread. It returns false if it is unable to do the collection due
// to the GC locker being active, true otherwise