8150068: Log the main G1 phases at info level

Reviewed-by: sjohanss, tschatzl
This commit is contained in:
Bengt Rutisson 2016-02-29 13:06:03 +01:00
parent 234373ff31
commit 190c092900
10 changed files with 295 additions and 394 deletions

View File

@ -1117,14 +1117,15 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
_short_lived_surv_rate_group->start_adding_regions(); _short_lived_surv_rate_group->start_adding_regions();
// Do that for any other surv rate groups // Do that for any other surv rate groups
double scan_hcc_time_ms = ConcurrentG1Refine::hot_card_cache_enabled() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0;
if (update_stats) { if (update_stats) {
double cost_per_card_ms = 0.0; double cost_per_card_ms = 0.0;
double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC);
if (_pending_cards > 0) { if (_pending_cards > 0) {
cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards; cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards;
_cost_per_card_ms_seq->add(cost_per_card_ms); _cost_per_card_ms_seq->add(cost_per_card_ms);
} }
_cost_scan_hcc_seq->add(cost_scan_hcc); _cost_scan_hcc_seq->add(scan_hcc_time_ms);
double cost_per_entry_ms = 0.0; double cost_per_entry_ms = 0.0;
if (cards_scanned > 10) { if (cards_scanned > 10) {
@ -1214,8 +1215,6 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
// Note that _mmu_tracker->max_gc_time() returns the time in seconds. // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
if (update_rs_time_goal_ms < scan_hcc_time_ms) { if (update_rs_time_goal_ms < scan_hcc_time_ms) {
log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)." log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
"Update RS time goal: %1.2fms Scan HCC time: %1.2fms", "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",

View File

@ -28,109 +28,70 @@
#include "gc/g1/g1GCPhaseTimes.hpp" #include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1StringDedup.hpp" #include "gc/g1/g1StringDedup.hpp"
#include "gc/g1/workerDataArray.inline.hpp" #include "gc/g1/workerDataArray.inline.hpp"
#include "memory/allocation.hpp" #include "memory/resourceArea.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "runtime/timer.hpp" #include "runtime/timer.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
// Helper class for avoiding interleaved logging static const char* Indents[5] = {"", " ", " ", " ", " "};
class LineBuffer: public StackObj {
private:
static const int BUFFER_LEN = 1024;
static const int INDENT_CHARS = 3;
char _buffer[BUFFER_LEN];
int _indent_level;
int _cur;
void vappend(const char* format, va_list ap) ATTRIBUTE_PRINTF(2, 0) {
int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
if (res != -1) {
_cur += res;
} else {
DEBUG_ONLY(warning("buffer too small in LineBuffer");)
_buffer[BUFFER_LEN -1] = 0;
_cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
}
}
public:
explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
_buffer[_cur] = ' ';
}
}
#ifndef PRODUCT
~LineBuffer() {
assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
}
#endif
void append(const char* format, ...) ATTRIBUTE_PRINTF(2, 3) {
va_list ap;
va_start(ap, format);
vappend(format, ap);
va_end(ap);
}
const char* to_string() {
_cur = _indent_level * INDENT_CHARS;
return _buffer;
}
};
static const char* Indents[4] = {"", " ", " ", " "};
G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) : G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
_max_gc_threads(max_gc_threads) _max_gc_threads(max_gc_threads)
{ {
assert(max_gc_threads > 0, "Must have some GC threads"); assert(max_gc_threads > 0, "Must have some GC threads");
_gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start:", false, 2); _gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start (ms):");
_gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning:", true, 2); _gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning (ms):");
// Root scanning phases // Root scanning phases
_gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots:", true, 3); _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots (ms):");
_gc_par_phases[StringTableRoots] = new WorkerDataArray<double>(max_gc_threads, "StringTable Roots:", true, 3); _gc_par_phases[StringTableRoots] = new WorkerDataArray<double>(max_gc_threads, "StringTable Roots (ms):");
_gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots:", true, 3); _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots (ms):");
_gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots:", true, 3); _gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots (ms):");
_gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots:", true, 3); _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots (ms):");
_gc_par_phases[FlatProfilerRoots] = new WorkerDataArray<double>(max_gc_threads, "FlatProfiler Roots:", true, 3); _gc_par_phases[FlatProfilerRoots] = new WorkerDataArray<double>(max_gc_threads, "FlatProfiler Roots (ms):");
_gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots:", true, 3); _gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots (ms):");
_gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots:", true, 3); _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots (ms):");
_gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots:", true, 3); _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms):");
_gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots:", true, 3); _gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots (ms):");
_gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots:", true, 3); _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms):");
_gc_par_phases[WaitForStrongCLD] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong CLD:", true, 3); _gc_par_phases[WaitForStrongCLD] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong CLD (ms):");
_gc_par_phases[WeakCLDRoots] = new WorkerDataArray<double>(max_gc_threads, "Weak CLD Roots:", true, 3); _gc_par_phases[WeakCLDRoots] = new WorkerDataArray<double>(max_gc_threads, "Weak CLD Roots (ms):");
_gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering:", true, 3); _gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering (ms):");
_gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS:", true, 2); _gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS (ms):");
_gc_par_phases[ScanHCC] = new WorkerDataArray<double>(max_gc_threads, "Scan HCC:", true, 3); if (ConcurrentG1Refine::hot_card_cache_enabled()) {
_gc_par_phases[ScanHCC]->set_enabled(ConcurrentG1Refine::hot_card_cache_enabled()); _gc_par_phases[ScanHCC] = new WorkerDataArray<double>(max_gc_threads, "Scan HCC (ms):");
_gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS:", true, 2); } else {
_gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning:", true, 2); _gc_par_phases[ScanHCC] = NULL;
_gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy:", true, 2); }
_gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination:", true, 2); _gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS (ms):");
_gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total:", true, 2); _gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning (ms):");
_gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End:", false, 2); _gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms):");
_gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other:", true, 2); _gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms):");
_gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total (ms):");
_gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End (ms):");
_gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other (ms):");
_update_rs_processed_buffers = new WorkerDataArray<size_t>(max_gc_threads, "Processed Buffers:", true, 3); _update_rs_processed_buffers = new WorkerDataArray<size_t>(max_gc_threads, "Processed Buffers:");
_gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_processed_buffers); _gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_processed_buffers);
_termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts:", true, 3); _termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts:");
_gc_par_phases[Termination]->link_thread_work_items(_termination_attempts); _gc_par_phases[Termination]->link_thread_work_items(_termination_attempts);
_gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup:", true, 2); if (UseStringDeduplication) {
_gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup:", true, 2); _gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup (ms):");
_gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup (ms):");
} else {
_gc_par_phases[StringDedupQueueFixup] = NULL;
_gc_par_phases[StringDedupTableFixup] = NULL;
}
_gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty:", true, 3); _gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty (ms):");
_redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards:", true, 3); _redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards:");
_gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards); _gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);
_gc_par_phases[PreserveCMReferents] = new WorkerDataArray<double>(max_gc_threads, "Parallel Preserve CM Refs:", true, 3); _gc_par_phases[PreserveCMReferents] = new WorkerDataArray<double>(max_gc_threads, "Parallel Preserve CM Refs (ms):");
} }
void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) { void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
@ -142,11 +103,10 @@ void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
_external_accounted_time_ms = 0.0; _external_accounted_time_ms = 0.0;
for (int i = 0; i < GCParPhasesSentinel; i++) { for (int i = 0; i < GCParPhasesSentinel; i++) {
_gc_par_phases[i]->reset(); if (_gc_par_phases[i] != NULL) {
_gc_par_phases[i]->reset();
}
} }
_gc_par_phases[StringDedupQueueFixup]->set_enabled(G1StringDedup::is_enabled());
_gc_par_phases[StringDedupTableFixup]->set_enabled(G1StringDedup::is_enabled());
} }
void G1GCPhaseTimes::note_gc_end() { void G1GCPhaseTimes::note_gc_end() {
@ -168,43 +128,10 @@ void G1GCPhaseTimes::note_gc_end() {
} }
for (int i = 0; i < GCParPhasesSentinel; i++) { for (int i = 0; i < GCParPhasesSentinel; i++) {
_gc_par_phases[i]->verify(_active_gc_threads); if (_gc_par_phases[i] != NULL) {
} _gc_par_phases[i]->verify(_active_gc_threads);
}
void G1GCPhaseTimes::print_stats(const char* indent, const char* str, double value) {
log_debug(gc, phases)("%s%s: %.1lf ms", indent, str, value);
}
double G1GCPhaseTimes::accounted_time_ms() {
// First subtract any externally accounted time
double misc_time_ms = _external_accounted_time_ms;
// Subtract the root region scanning wait time. It's initialized to
// zero at the start of the pause.
misc_time_ms += _root_region_scan_wait_time_ms;
misc_time_ms += _cur_collection_par_time_ms;
// Now subtract the time taken to fix up roots in generated code
misc_time_ms += _cur_collection_code_root_fixup_time_ms;
// Strong code root purge time
misc_time_ms += _cur_strong_code_root_purge_time_ms;
if (G1StringDedup::is_enabled()) {
// String dedup fixup time
misc_time_ms += _cur_string_dedup_fixup_time_ms;
} }
}
// Subtract the time taken to clean the card table from the
// current value of "other time"
misc_time_ms += _cur_clear_ct_time_ms;
// Remove expand heap time from "other time"
misc_time_ms += _cur_expand_heap_time_ms;
return misc_time_ms;
} }
// record the time a phase took in seconds // record the time a phase took in seconds
@ -226,196 +153,144 @@ double G1GCPhaseTimes::average_time_ms(GCParPhases phase) {
return _gc_par_phases[phase]->average(_active_gc_threads) * 1000.0; return _gc_par_phases[phase]->average(_active_gc_threads) * 1000.0;
} }
double G1GCPhaseTimes::get_time_ms(GCParPhases phase, uint worker_i) {
return _gc_par_phases[phase]->get(worker_i) * 1000.0;
}
double G1GCPhaseTimes::sum_time_ms(GCParPhases phase) {
return _gc_par_phases[phase]->sum(_active_gc_threads) * 1000.0;
}
double G1GCPhaseTimes::min_time_ms(GCParPhases phase) {
return _gc_par_phases[phase]->minimum(_active_gc_threads) * 1000.0;
}
double G1GCPhaseTimes::max_time_ms(GCParPhases phase) {
return _gc_par_phases[phase]->maximum(_active_gc_threads) * 1000.0;
}
size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_i) {
assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
return _gc_par_phases[phase]->thread_work_items()->get(worker_i);
}
size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase) { size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase) {
assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count"); assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
return _gc_par_phases[phase]->thread_work_items()->sum(_active_gc_threads); return _gc_par_phases[phase]->thread_work_items()->sum(_active_gc_threads);
} }
double G1GCPhaseTimes::average_thread_work_items(GCParPhases phase) { template <class T>
assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count"); void G1GCPhaseTimes::details(T* phase, const char* indent) {
return _gc_par_phases[phase]->thread_work_items()->average(_active_gc_threads); LogHandle(gc, phases, task) log;
if (log.is_level(LogLevel::Trace)) {
outputStream* trace_out = log.trace_stream();
trace_out->print("%s", indent);
phase->print_details_on(trace_out, _active_gc_threads);
}
} }
size_t G1GCPhaseTimes::min_thread_work_items(GCParPhases phase) { void G1GCPhaseTimes::log_phase(WorkerDataArray<double>* phase, uint indent, outputStream* out, bool print_sum) {
assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count"); out->print("%s", Indents[indent]);
return _gc_par_phases[phase]->thread_work_items()->minimum(_active_gc_threads); phase->print_summary_on(out, _active_gc_threads, print_sum);
details(phase, Indents[indent]);
WorkerDataArray<size_t>* work_items = phase->thread_work_items();
if (work_items != NULL) {
out->print("%s", Indents[indent + 1]);
work_items->print_summary_on(out, _active_gc_threads, true);
details(work_items, Indents[indent + 1]);
}
} }
size_t G1GCPhaseTimes::max_thread_work_items(GCParPhases phase) { void G1GCPhaseTimes::debug_phase(WorkerDataArray<double>* phase) {
assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count"); LogHandle(gc, phases) log;
return _gc_par_phases[phase]->thread_work_items()->maximum(_active_gc_threads); if (log.is_level(LogLevel::Debug)) {
ResourceMark rm;
log_phase(phase, 2, log.debug_stream(), true);
}
} }
class G1GCParPhasePrinter : public StackObj { void G1GCPhaseTimes::trace_phase(WorkerDataArray<double>* phase, bool print_sum) {
G1GCPhaseTimes* _phase_times; LogHandle(gc, phases) log;
public: if (log.is_level(LogLevel::Trace)) {
G1GCParPhasePrinter(G1GCPhaseTimes* phase_times) : _phase_times(phase_times) {} ResourceMark rm;
log_phase(phase, 3, log.trace_stream(), print_sum);
void print(G1GCPhaseTimes::GCParPhases phase_id) {
WorkerDataArray<double>* phase = _phase_times->_gc_par_phases[phase_id];
if (phase->_length == 1) {
print_single_length(phase_id, phase);
} else {
print_multi_length(phase_id, phase);
}
} }
}
#define PHASE_DOUBLE_FORMAT "%s%s: %.1lfms"
#define PHASE_SIZE_FORMAT "%s%s: " SIZE_FORMAT
private: #define info_line(str, value) \
void print_single_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) { log_info(gc, phases)(PHASE_DOUBLE_FORMAT, Indents[1], str, value);
// No need for min, max, average and sum for only one worker
log_debug(gc, phases)("%s%s: %.1lf", Indents[phase->_indent_level], phase->_title, _phase_times->get_time_ms(phase_id, 0));
WorkerDataArray<size_t>* work_items = phase->_thread_work_items; #define debug_line(str, value) \
if (work_items != NULL) { log_debug(gc, phases)(PHASE_DOUBLE_FORMAT, Indents[2], str, value);
log_debug(gc, phases)("%s%s: " SIZE_FORMAT, Indents[work_items->_indent_level], work_items->_title, _phase_times->sum_thread_work_items(phase_id));
}
}
void print_time_values(const char* indent, G1GCPhaseTimes::GCParPhases phase_id) { #define trace_line(str, value) \
if (log_is_enabled(Trace, gc)) { log_trace(gc, phases)(PHASE_DOUBLE_FORMAT, Indents[3], str, value);
LineBuffer buf(0);
uint active_length = _phase_times->_active_gc_threads;
for (uint i = 0; i < active_length; ++i) {
buf.append(" %4.1lf", _phase_times->get_time_ms(phase_id, i));
}
const char* line = buf.to_string();
log_trace(gc, phases)("%s%-25s%s", indent, "", line);
}
}
void print_count_values(const char* indent, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) { #define trace_line_sz(str, value) \
if (log_is_enabled(Trace, gc)) { log_trace(gc, phases)(PHASE_SIZE_FORMAT, Indents[3], str, value);
LineBuffer buf(0);
uint active_length = _phase_times->_active_gc_threads;
for (uint i = 0; i < active_length; ++i) {
buf.append(" " SIZE_FORMAT, _phase_times->get_thread_work_item(phase_id, i));
}
const char* line = buf.to_string();
log_trace(gc, phases)("%s%-25s%s", indent, "", line);
}
}
void print_thread_work_items(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) { #define trace_line_ms(str, value) \
const char* indent = Indents[thread_work_items->_indent_level]; log_trace(gc, phases)(PHASE_SIZE_FORMAT, Indents[3], str, value);
assert(thread_work_items->_print_sum, "%s does not have print sum true even though it is a count", thread_work_items->_title); #define info_line_and_account(str, value) \
info_line(str, value); \
log_debug(gc, phases)("%s%-25s Min: " SIZE_FORMAT ", Avg: %4.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT ", Sum: " SIZE_FORMAT, accounted_time_ms += value;
indent, thread_work_items->_title,
_phase_times->min_thread_work_items(phase_id), _phase_times->average_thread_work_items(phase_id), _phase_times->max_thread_work_items(phase_id),
_phase_times->max_thread_work_items(phase_id) - _phase_times->min_thread_work_items(phase_id), _phase_times->sum_thread_work_items(phase_id));
print_count_values(indent, phase_id, thread_work_items);
}
void print_multi_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
const char* indent = Indents[phase->_indent_level];
if (phase->_print_sum) {
log_debug(gc, phases)("%s%-25s Min: %4.1lf, Avg: %4.1lf, Max: %4.1lf, Diff: %4.1lf, Sum: %4.1lf",
indent, phase->_title,
_phase_times->min_time_ms(phase_id), _phase_times->average_time_ms(phase_id), _phase_times->max_time_ms(phase_id),
_phase_times->max_time_ms(phase_id) - _phase_times->min_time_ms(phase_id), _phase_times->sum_time_ms(phase_id));
} else {
log_debug(gc, phases)("%s%-25s Min: %4.1lf, Avg: %4.1lf, Max: %4.1lf, Diff: %4.1lf",
indent, phase->_title,
_phase_times->min_time_ms(phase_id), _phase_times->average_time_ms(phase_id), _phase_times->max_time_ms(phase_id),
_phase_times->max_time_ms(phase_id) - _phase_times->min_time_ms(phase_id));
}
print_time_values(indent, phase_id);
if (phase->_thread_work_items != NULL) {
print_thread_work_items(phase_id, phase->_thread_work_items);
}
}
};
void G1GCPhaseTimes::print() { void G1GCPhaseTimes::print() {
note_gc_end(); note_gc_end();
G1GCParPhasePrinter par_phase_printer(this); double accounted_time_ms = _external_accounted_time_ms;
if (_root_region_scan_wait_time_ms > 0.0) { if (_root_region_scan_wait_time_ms > 0.0) {
print_stats(Indents[1], "Root Region Scan Waiting", _root_region_scan_wait_time_ms); info_line_and_account("Root Region Scan Waiting", _root_region_scan_wait_time_ms);
} }
print_stats(Indents[1], "Parallel Time", _cur_collection_par_time_ms); info_line_and_account("Evacuate Collection Set", _cur_collection_par_time_ms);
for (int i = 0; i <= GCMainParPhasesLast; i++) { trace_phase(_gc_par_phases[GCWorkerStart], false);
par_phase_printer.print((GCParPhases) i); debug_phase(_gc_par_phases[ExtRootScan]);
for (int i = ThreadRoots; i <= SATBFiltering; i++) {
trace_phase(_gc_par_phases[i]);
} }
debug_phase(_gc_par_phases[UpdateRS]);
if (ConcurrentG1Refine::hot_card_cache_enabled()) {
trace_phase(_gc_par_phases[ScanHCC]);
}
debug_phase(_gc_par_phases[ScanRS]);
debug_phase(_gc_par_phases[CodeRoots]);
debug_phase(_gc_par_phases[ObjCopy]);
debug_phase(_gc_par_phases[Termination]);
debug_phase(_gc_par_phases[Other]);
debug_phase(_gc_par_phases[GCWorkerTotal]);
trace_phase(_gc_par_phases[GCWorkerEnd], false);
info_line_and_account("Code Roots", _cur_collection_code_root_fixup_time_ms + _cur_strong_code_root_purge_time_ms);
debug_line("Code Roots Fixup", _cur_collection_code_root_fixup_time_ms);
debug_line("Code Roots Purge", _cur_strong_code_root_purge_time_ms);
print_stats(Indents[1], "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
print_stats(Indents[1], "Code Root Purge", _cur_strong_code_root_purge_time_ms);
if (G1StringDedup::is_enabled()) { if (G1StringDedup::is_enabled()) {
print_stats(Indents[1], "String Dedup Fixup", _cur_string_dedup_fixup_time_ms); info_line_and_account("String Dedup Fixup", _cur_string_dedup_fixup_time_ms);
for (int i = StringDedupPhasesFirst; i <= StringDedupPhasesLast; i++) { debug_phase(_gc_par_phases[StringDedupQueueFixup]);
par_phase_printer.print((GCParPhases) i); debug_phase(_gc_par_phases[StringDedupTableFixup]);
}
} }
print_stats(Indents[1], "Clear CT", _cur_clear_ct_time_ms); info_line_and_account("Clear Card Table", _cur_clear_ct_time_ms);
print_stats(Indents[1], "Expand Heap After Collection", _cur_expand_heap_time_ms); info_line_and_account("Expand Heap After Collection", _cur_expand_heap_time_ms);
double misc_time_ms = _gc_pause_time_ms - accounted_time_ms();
print_stats(Indents[1], "Other", misc_time_ms); double free_cset_time = _recorded_young_free_cset_time_ms + _recorded_non_young_free_cset_time_ms;
info_line_and_account("Free Collection Set", free_cset_time);
debug_line("Young Free Collection Set", _recorded_young_free_cset_time_ms);
debug_line("Non-Young Free Collection Set", _recorded_non_young_free_cset_time_ms);
info_line_and_account("Merge Per-Thread State", _recorded_merge_pss_time_ms);
info_line("Other", _gc_pause_time_ms - accounted_time_ms);
if (_cur_verify_before_time_ms > 0.0) { if (_cur_verify_before_time_ms > 0.0) {
print_stats(Indents[2], "Verify Before", _cur_verify_before_time_ms); debug_line("Verify Before", _cur_verify_before_time_ms);
} }
if (G1CollectedHeap::heap()->evacuation_failed()) { if (G1CollectedHeap::heap()->evacuation_failed()) {
double evac_fail_handling = _cur_evac_fail_recalc_used + _cur_evac_fail_remove_self_forwards + double evac_fail_handling = _cur_evac_fail_recalc_used + _cur_evac_fail_remove_self_forwards +
_cur_evac_fail_restore_remsets; _cur_evac_fail_restore_remsets;
print_stats(Indents[2], "Evacuation Failure", evac_fail_handling); debug_line("Evacuation Failure", evac_fail_handling);
log_trace(gc, phases)("%sRecalculate Used: %.1lf ms", Indents[3], _cur_evac_fail_recalc_used); trace_line("Recalculate Used", _cur_evac_fail_recalc_used);
log_trace(gc, phases)("%sRemove Self Forwards: %.1lf ms", Indents[3], _cur_evac_fail_remove_self_forwards); trace_line("Remove Self Forwards",_cur_evac_fail_remove_self_forwards);
log_trace(gc, phases)("%sRestore RemSet: %.1lf ms", Indents[3], _cur_evac_fail_restore_remsets); trace_line("Restore RemSet", _cur_evac_fail_restore_remsets);
} }
print_stats(Indents[2], "Choose CSet", debug_line("Choose CSet", (_recorded_young_cset_choice_time_ms + _recorded_non_young_cset_choice_time_ms));
(_recorded_young_cset_choice_time_ms + debug_line("Preserve CM Refs", _recorded_preserve_cm_referents_time_ms);
_recorded_non_young_cset_choice_time_ms)); debug_line("Ref Proc", _cur_ref_proc_time_ms);
print_stats(Indents[2], "Preserve CM Refs", _recorded_preserve_cm_referents_time_ms); debug_line("Ref Enq", _cur_ref_enq_time_ms);
print_stats(Indents[2], "Ref Proc", _cur_ref_proc_time_ms); debug_line("Redirty Cards", _recorded_redirty_logged_cards_time_ms);
print_stats(Indents[2], "Ref Enq", _cur_ref_enq_time_ms); trace_phase(_gc_par_phases[RedirtyCards]);
print_stats(Indents[2], "Redirty Cards", _recorded_redirty_logged_cards_time_ms); trace_phase(_gc_par_phases[PreserveCMReferents]);
par_phase_printer.print(RedirtyCards);
par_phase_printer.print(PreserveCMReferents);
if (G1EagerReclaimHumongousObjects) { if (G1EagerReclaimHumongousObjects) {
print_stats(Indents[2], "Humongous Register", _cur_fast_reclaim_humongous_register_time_ms); debug_line("Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
trace_line_sz("Humongous Total", _cur_fast_reclaim_humongous_total);
log_trace(gc, phases)("%sHumongous Total: " SIZE_FORMAT, Indents[3], _cur_fast_reclaim_humongous_total); trace_line_sz("Humongous Candidate", _cur_fast_reclaim_humongous_candidates);
log_trace(gc, phases)("%sHumongous Candidate: " SIZE_FORMAT, Indents[3], _cur_fast_reclaim_humongous_candidates); debug_line("Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
print_stats(Indents[2], "Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms); trace_line_sz("Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed);
log_trace(gc, phases)("%sHumongous Reclaimed: " SIZE_FORMAT, Indents[3], _cur_fast_reclaim_humongous_reclaimed);
} }
print_stats(Indents[2], "Free CSet",
(_recorded_young_free_cset_time_ms +
_recorded_non_young_free_cset_time_ms));
log_trace(gc, phases)("%sYoung Free CSet: %.1lf ms", Indents[3], _recorded_young_free_cset_time_ms);
log_trace(gc, phases)("%sNon-Young Free CSet: %.1lf ms", Indents[3], _recorded_non_young_free_cset_time_ms);
print_stats(Indents[2], "Merge Per-Thread State", _recorded_merge_pss_time_ms);
if (_cur_verify_after_time_ms > 0.0) { if (_cur_verify_after_time_ms > 0.0) {
print_stats(Indents[2], "Verify After", _cur_verify_after_time_ms); debug_line("Verify After", _cur_verify_after_time_ms);
} }
} }

View File

@ -32,8 +32,6 @@ class LineBuffer;
template <class T> class WorkerDataArray; template <class T> class WorkerDataArray;
class G1GCPhaseTimes : public CHeapObj<mtGC> { class G1GCPhaseTimes : public CHeapObj<mtGC> {
friend class G1GCParPhasePrinter;
uint _active_gc_threads; uint _active_gc_threads;
uint _max_gc_threads; uint _max_gc_threads;
jlong _gc_start_counter; jlong _gc_start_counter;
@ -125,11 +123,14 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _cur_verify_before_time_ms; double _cur_verify_before_time_ms;
double _cur_verify_after_time_ms; double _cur_verify_after_time_ms;
// Helper methods for detailed logging
void print_stats(const char*, const char* str, double value);
void note_gc_end(); void note_gc_end();
template <class T>
void details(T* phase, const char* indent);
void log_phase(WorkerDataArray<double>* phase, uint indent, outputStream* out, bool print_sum);
void debug_phase(WorkerDataArray<double>* phase);
void trace_phase(WorkerDataArray<double>* phase, bool print_sum = true);
public: public:
G1GCPhaseTimes(uint max_gc_threads); G1GCPhaseTimes(uint max_gc_threads);
void note_gc_start(uint active_gc_threads); void note_gc_start(uint active_gc_threads);
@ -148,16 +149,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
size_t sum_thread_work_items(GCParPhases phase); size_t sum_thread_work_items(GCParPhases phase);
private:
double get_time_ms(GCParPhases phase, uint worker_i);
double sum_time_ms(GCParPhases phase);
double min_time_ms(GCParPhases phase);
double max_time_ms(GCParPhases phase);
size_t get_thread_work_item(GCParPhases phase, uint worker_i);
double average_thread_work_items(GCParPhases phase);
size_t min_thread_work_items(GCParPhases phase);
size_t max_thread_work_items(GCParPhases phase);
public: public:
void record_clear_ct_time(double ms) { void record_clear_ct_time(double ms) {
@ -263,8 +254,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_external_accounted_time_ms += time_ms; _external_accounted_time_ms += time_ms;
} }
double accounted_time_ms();
double cur_collection_start_sec() { double cur_collection_start_sec() {
return _cur_collection_start_sec; return _cur_collection_start_sec;
} }

View File

@ -81,10 +81,7 @@ jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
} }
void G1HotCardCache::drain(CardTableEntryClosure* cl, uint worker_i) { void G1HotCardCache::drain(CardTableEntryClosure* cl, uint worker_i) {
if (!default_use_cache()) { assert(default_use_cache(), "Drain only necessary if we use the hot card cache.");
assert(_hot_cache == NULL, "Logic");
return;
}
assert(_hot_cache != NULL, "Logic"); assert(_hot_cache != NULL, "Logic");
assert(!use_cache(), "cache should be disabled"); assert(!use_cache(), "cache should be disabled");

View File

@ -238,7 +238,7 @@ void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) {
RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq); RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i); G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i);
{ if (ConcurrentG1Refine::hot_card_cache_enabled()) {
// Apply the closure to the entries of the hot card cache. // Apply the closure to the entries of the hot card cache.
G1GCParPhaseTimesTracker y(_g1p->phase_times(), G1GCPhaseTimes::ScanHCC, worker_i); G1GCParPhaseTimesTracker y(_g1p->phase_times(), G1GCPhaseTimes::ScanHCC, worker_i);
_g1->iterate_hcc_closure(&into_cset_update_rs_cl, worker_i); _g1->iterate_hcc_closure(&into_cset_update_rs_cl, worker_i);

View File

@ -24,18 +24,53 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc/g1/workerDataArray.inline.hpp" #include "gc/g1/workerDataArray.inline.hpp"
#include "utilities/ostream.hpp"
template <>
void WorkerDataArray<double>::WDAPrinter::summary(outputStream* out, const char* title, double min, double avg, double max, double diff, double sum, bool print_sum) {
out->print("%-25s Min: %4.1lf, Avg: %4.1lf, Max: %4.1lf, Diff: %4.1lf", title, min * MILLIUNITS, avg * MILLIUNITS, max * MILLIUNITS, diff* MILLIUNITS);
if (print_sum) {
out->print_cr(", Sum: %4.1lf", sum * MILLIUNITS);
} else {
out->cr();
}
}
template <>
void WorkerDataArray<size_t>::WDAPrinter::summary(outputStream* out, const char* title, size_t min, double avg, size_t max, size_t diff, size_t sum, bool print_sum) {
out->print("%-25s Min: " SIZE_FORMAT ", Avg: %4.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT, title, min, avg, max, diff);
if (print_sum) {
out->print_cr(", Sum: " SIZE_FORMAT, sum);
} else {
out->cr();
}
}
template <>
void WorkerDataArray<double>::WDAPrinter::details(const WorkerDataArray<double>* phase, outputStream* out, uint active_threads) {
out->print("%-25s", "");
for (uint i = 0; i < active_threads; ++i) {
out->print(" %4.1lf", phase->get(i) * 1000.0);
}
out->cr();
}
template <>
void WorkerDataArray<size_t>::WDAPrinter::details(const WorkerDataArray<size_t>* phase, outputStream* out, uint active_threads) {
out->print("%-25s", "");
for (uint i = 0; i < active_threads; ++i) {
out->print(" " SIZE_FORMAT, phase->get(i));
}
out->cr();
}
#ifndef PRODUCT #ifndef PRODUCT
void WorkerDataArray_test() { void WorkerDataArray_test() {
const uint length = 3; const uint length = 3;
const char* title = "Test array"; const char* title = "Test array";
const bool print_sum = false;
const uint indent_level = 2;
WorkerDataArray<size_t> array(length, title, print_sum, indent_level); WorkerDataArray<size_t> array(length, title);
assert(strncmp(array.title(), title, strlen(title)) == 0 , "Expected titles to match"); assert(strncmp(array.title(), title, strlen(title)) == 0 , "Expected titles to match");
assert(array.should_print_sum() == print_sum, "Expected should_print_sum to match print_sum");
assert(array.indentation() == indent_level, "Expected indentation to match");
const size_t expected[length] = {5, 3, 7}; const size_t expected[length] = {5, 3, 7};
for (uint i = 0; i < length; i++) { for (uint i = 0; i < length; i++) {
@ -46,10 +81,7 @@ void WorkerDataArray_test() {
} }
assert(array.sum(length) == (5 + 3 + 7), "Expected sums to match"); assert(array.sum(length) == (5 + 3 + 7), "Expected sums to match");
assert(array.minimum(length) == 3, "Expected mininum to match"); assert(array.average(length) == 5.0, "Expected averages to match");
assert(array.maximum(length) == 7, "Expected maximum to match");
assert(array.diff(length) == (7 - 3), "Expected diffs to match");
assert(array.average(length) == 5, "Expected averages to match");
for (uint i = 0; i < length; i++) { for (uint i = 0; i < length; i++) {
array.add(i, 1); array.add(i, 1);

View File

@ -22,18 +22,19 @@
* *
*/ */
#ifndef SHARE_VM_GC_G1_WORKERDATAARRAY_HPP
#define SHARE_VM_GC_G1_WORKERDATAARRAY_HPP
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
class outputStream;
template <class T> template <class T>
class WorkerDataArray : public CHeapObj<mtGC> { class WorkerDataArray : public CHeapObj<mtGC> {
friend class G1GCParPhasePrinter;
T* _data; T* _data;
uint _length; uint _length;
const char* _title; const char* _title;
bool _print_sum;
uint _indent_level;
bool _enabled;
WorkerDataArray<size_t>* _thread_work_items; WorkerDataArray<size_t>* _thread_work_items;
@ -42,11 +43,7 @@ class WorkerDataArray : public CHeapObj<mtGC> {
void set_all(T value); void set_all(T value);
public: public:
WorkerDataArray(uint length, WorkerDataArray(uint length, const char* title);
const char* title,
bool print_sum,
uint indent_level);
~WorkerDataArray(); ~WorkerDataArray();
void link_thread_work_items(WorkerDataArray<size_t>* thread_work_items); void link_thread_work_items(WorkerDataArray<size_t>* thread_work_items);
@ -62,27 +59,30 @@ class WorkerDataArray : public CHeapObj<mtGC> {
double average(uint active_threads) const; double average(uint active_threads) const;
T sum(uint active_threads) const; T sum(uint active_threads) const;
T minimum(uint active_threads) const;
T maximum(uint active_threads) const;
T diff(uint active_threads) const;
uint indentation() const {
return _indent_level;
}
const char* title() const { const char* title() const {
return _title; return _title;
} }
bool should_print_sum() const {
return _print_sum;
}
void clear(); void clear();
void set_enabled(bool enabled) {
_enabled = enabled;
}
void reset() PRODUCT_RETURN; void reset() PRODUCT_RETURN;
void verify(uint active_threads) const PRODUCT_RETURN; void verify(uint active_threads) const PRODUCT_RETURN;
private:
class WDAPrinter {
public:
static void summary(outputStream* out, const char* title, double min, double avg, double max, double diff, double sum, bool print_sum);
static void summary(outputStream* out, const char* title, size_t min, double avg, size_t max, size_t diff, size_t sum, bool print_sum);
static void details(const WorkerDataArray<double>* phase, outputStream* out, uint active_threads);
static void details(const WorkerDataArray<size_t>* phase, outputStream* out, uint active_threads);
};
public:
void print_summary_on(outputStream* out, uint active_threads, bool print_sum = true) const;
void print_details_on(outputStream* out, uint active_threads) const;
}; };
#endif // SHARE_VM_GC_G1_WORKERDATAARRAY_HPP

View File

@ -22,20 +22,18 @@
* *
*/ */
#ifndef SHARE_VM_GC_G1_WORKERDATAARRAY_INLINE_HPP
#define SHARE_VM_GC_G1_WORKERDATAARRAY_INLINE_HPP
#include "gc/g1/workerDataArray.hpp" #include "gc/g1/workerDataArray.hpp"
#include "memory/allocation.inline.hpp" #include "memory/allocation.inline.hpp"
#include "utilities/ostream.hpp"
template <typename T> template <typename T>
WorkerDataArray<T>::WorkerDataArray(uint length, WorkerDataArray<T>::WorkerDataArray(uint length, const char* title) :
const char* title,
bool print_sum,
uint indent_level) :
_title(title), _title(title),
_length(0), _length(0),
_print_sum(print_sum), _thread_work_items(NULL) {
_indent_level(indent_level),
_thread_work_items(NULL),
_enabled(true) {
assert(length > 0, "Must have some workers to store data for"); assert(length > 0, "Must have some workers to store data for");
_length = length; _length = length;
_data = NEW_C_HEAP_ARRAY(T, _length, mtGC); _data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
@ -93,29 +91,6 @@ T WorkerDataArray<T>::sum(uint active_threads) const {
return s; return s;
} }
template <typename T>
T WorkerDataArray<T>::minimum(uint active_threads) const {
T min = get(0);
for (uint i = 1; i < active_threads; ++i) {
min = MIN2(min, get(i));
}
return min;
}
template <typename T>
T WorkerDataArray<T>::maximum(uint active_threads) const {
T max = get(0);
for (uint i = 1; i < active_threads; ++i) {
max = MAX2(max, get(i));
}
return max;
}
template <typename T>
T WorkerDataArray<T>::diff(uint active_threads) const {
return maximum(active_threads) - minimum(active_threads);
}
template <typename T> template <typename T>
void WorkerDataArray<T>::clear() { void WorkerDataArray<T>::clear() {
set_all(0); set_all(0);
@ -128,6 +103,27 @@ void WorkerDataArray<T>::set_all(T value) {
} }
} }
template <class T>
void WorkerDataArray<T>::print_summary_on(outputStream* out, uint active_threads, bool print_sum) const {
T max = get(0);
T min = max;
T sum = 0;
for (uint i = 1; i < active_threads; ++i) {
T value = get(i);
max = MAX2(max, value);
min = MIN2(min, value);
sum += value;
}
T diff = max - min;
double avg = sum / (double) active_threads;
WDAPrinter::summary(out, title(), min, avg, max, diff, sum, print_sum);
}
template <class T>
void WorkerDataArray<T>::print_details_on(outputStream* out, uint active_threads) const {
WDAPrinter::details(this, out, active_threads);
}
#ifndef PRODUCT #ifndef PRODUCT
template <typename T> template <typename T>
void WorkerDataArray<T>::reset() { void WorkerDataArray<T>::reset() {
@ -139,10 +135,6 @@ void WorkerDataArray<T>::reset() {
template <typename T> template <typename T>
void WorkerDataArray<T>::verify(uint active_threads) const { void WorkerDataArray<T>::verify(uint active_threads) const {
if (!_enabled) {
return;
}
assert(active_threads <= _length, "Wrong number of active threads"); assert(active_threads <= _length, "Wrong number of active threads");
for (uint i = 0; i < active_threads; i++) { for (uint i = 0; i < active_threads; i++) {
assert(_data[i] != uninitialized(), assert(_data[i] != uninitialized(),
@ -163,3 +155,5 @@ inline double WorkerDataArray<double>::uninitialized() const {
return -1.0; return -1.0;
} }
#endif #endif
#endif // SHARE_VM_GC_G1_WORKERDATAARRAY_INLINE_HPP

View File

@ -58,6 +58,7 @@
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, metaspace)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, metaspace)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases, start)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases, start)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases, task)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, plab)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, plab)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, region)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, region)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, remset)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, remset)) \

View File

@ -38,10 +38,24 @@ import jdk.test.lib.OutputAnalyzer;
public class TestGCLogMessages { public class TestGCLogMessages {
private enum Level { private enum Level {
OFF, DEBUG, TRACE; OFF(""),
public boolean lessOrEqualTo(Level other) { INFO("info"),
DEBUG("debug"),
TRACE("trace");
private String logName;
Level(String logName) {
this.logName = logName;
}
public boolean lessThan(Level other) {
return this.compareTo(other) < 0; return this.compareTo(other) < 0;
} }
public String toString() {
return logName;
}
} }
private class LogMessageWithLevel { private class LogMessageWithLevel {
@ -56,48 +70,48 @@ public class TestGCLogMessages {
private LogMessageWithLevel allLogMessages[] = new LogMessageWithLevel[] { private LogMessageWithLevel allLogMessages[] = new LogMessageWithLevel[] {
// Update RS // Update RS
new LogMessageWithLevel("Scan HCC", Level.DEBUG), new LogMessageWithLevel("Scan HCC", Level.TRACE),
// Ext Root Scan // Ext Root Scan
new LogMessageWithLevel("Thread Roots:", Level.DEBUG), new LogMessageWithLevel("Thread Roots", Level.TRACE),
new LogMessageWithLevel("StringTable Roots:", Level.DEBUG), new LogMessageWithLevel("StringTable Roots", Level.TRACE),
new LogMessageWithLevel("Universe Roots:", Level.DEBUG), new LogMessageWithLevel("Universe Roots", Level.TRACE),
new LogMessageWithLevel("JNI Handles Roots:", Level.DEBUG), new LogMessageWithLevel("JNI Handles Roots", Level.TRACE),
new LogMessageWithLevel("ObjectSynchronizer Roots:", Level.DEBUG), new LogMessageWithLevel("ObjectSynchronizer Roots", Level.TRACE),
new LogMessageWithLevel("FlatProfiler Roots", Level.DEBUG), new LogMessageWithLevel("FlatProfiler Roots", Level.TRACE),
new LogMessageWithLevel("Management Roots", Level.DEBUG), new LogMessageWithLevel("Management Roots", Level.TRACE),
new LogMessageWithLevel("SystemDictionary Roots", Level.DEBUG), new LogMessageWithLevel("SystemDictionary Roots", Level.TRACE),
new LogMessageWithLevel("CLDG Roots", Level.DEBUG), new LogMessageWithLevel("CLDG Roots", Level.TRACE),
new LogMessageWithLevel("JVMTI Roots", Level.DEBUG), new LogMessageWithLevel("JVMTI Roots", Level.TRACE),
new LogMessageWithLevel("SATB Filtering", Level.DEBUG), new LogMessageWithLevel("SATB Filtering", Level.TRACE),
new LogMessageWithLevel("CM RefProcessor Roots", Level.DEBUG), new LogMessageWithLevel("CM RefProcessor Roots", Level.TRACE),
new LogMessageWithLevel("Wait For Strong CLD", Level.DEBUG), new LogMessageWithLevel("Wait For Strong CLD", Level.TRACE),
new LogMessageWithLevel("Weak CLD Roots", Level.DEBUG), new LogMessageWithLevel("Weak CLD Roots", Level.TRACE),
// Redirty Cards // Redirty Cards
new LogMessageWithLevel("Redirty Cards", Level.DEBUG), new LogMessageWithLevel("Redirty Cards", Level.DEBUG),
new LogMessageWithLevel("Parallel Redirty", Level.DEBUG), new LogMessageWithLevel("Parallel Redirty", Level.TRACE),
new LogMessageWithLevel("Redirtied Cards", Level.DEBUG), new LogMessageWithLevel("Redirtied Cards", Level.TRACE),
// Misc Top-level // Misc Top-level
new LogMessageWithLevel("Code Root Purge", Level.DEBUG), new LogMessageWithLevel("Code Roots Purge", Level.DEBUG),
new LogMessageWithLevel("String Dedup Fixup", Level.DEBUG), new LogMessageWithLevel("String Dedup Fixup", Level.INFO),
new LogMessageWithLevel("Expand Heap After Collection", Level.DEBUG), new LogMessageWithLevel("Expand Heap After Collection", Level.INFO),
// Free CSet // Free CSet
new LogMessageWithLevel("Young Free CSet", Level.TRACE), new LogMessageWithLevel("Young Free Collection Set", Level.DEBUG),
new LogMessageWithLevel("Non-Young Free CSet", Level.TRACE), new LogMessageWithLevel("Non-Young Free Collection Set", Level.DEBUG),
// Humongous Eager Reclaim // Humongous Eager Reclaim
new LogMessageWithLevel("Humongous Reclaim", Level.DEBUG), new LogMessageWithLevel("Humongous Reclaim", Level.DEBUG),
new LogMessageWithLevel("Humongous Register", Level.DEBUG), new LogMessageWithLevel("Humongous Register", Level.DEBUG),
// Preserve CM Referents // Preserve CM Referents
new LogMessageWithLevel("Preserve CM Refs", Level.DEBUG), new LogMessageWithLevel("Preserve CM Refs", Level.DEBUG),
// Merge PSS // Merge PSS
new LogMessageWithLevel("Merge Per-Thread State", Level.DEBUG), new LogMessageWithLevel("Merge Per-Thread State", Level.INFO),
}; };
void checkMessagesAtLevel(OutputAnalyzer output, LogMessageWithLevel messages[], Level level) throws Exception { void checkMessagesAtLevel(OutputAnalyzer output, LogMessageWithLevel messages[], Level level) throws Exception {
for (LogMessageWithLevel l : messages) { for (LogMessageWithLevel l : messages) {
if (level.lessOrEqualTo(l.level)) { if (level.lessThan(l.level)) {
output.shouldNotContain(l.message); output.shouldNotContain(l.message);
} else { } else {
output.shouldContain(l.message); output.shouldMatch("\\[" + l.level + ".*" + l.message);
} }
} }
} }