8173335: Improve logging for j.l.ref.reference processing
Add logs for each phases of references and stats for each worker threads Reviewed-by: tschatzl, shade
This commit is contained in:
parent
ebb8c32e7f
commit
ebcca449a0
@ -5185,6 +5185,7 @@ void CMSCollector::refProcessingWork() {
|
||||
CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
|
||||
_span, &_markBitMap, &_markStack,
|
||||
&cmsKeepAliveClosure, false /* !preclean */);
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q());
|
||||
{
|
||||
GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
|
||||
|
||||
@ -5211,16 +5212,16 @@ void CMSCollector::refProcessingWork() {
|
||||
&cmsKeepAliveClosure,
|
||||
&cmsDrainMarkingStackClosure,
|
||||
&task_executor,
|
||||
_gc_timer_cm);
|
||||
&pt);
|
||||
} else {
|
||||
stats = rp->process_discovered_references(&_is_alive_closure,
|
||||
&cmsKeepAliveClosure,
|
||||
&cmsDrainMarkingStackClosure,
|
||||
NULL,
|
||||
_gc_timer_cm);
|
||||
&pt);
|
||||
}
|
||||
_gc_tracer_cm->report_gc_reference_stats(stats);
|
||||
|
||||
pt.print_all_references();
|
||||
}
|
||||
|
||||
// This is the point where the entire marking should have completed.
|
||||
@ -5261,11 +5262,12 @@ void CMSCollector::refProcessingWork() {
|
||||
if (rp->processing_is_mt()) {
|
||||
rp->balance_all_queues();
|
||||
CMSRefProcTaskExecutor task_executor(*this);
|
||||
rp->enqueue_discovered_references(&task_executor);
|
||||
rp->enqueue_discovered_references(&task_executor, &pt);
|
||||
} else {
|
||||
rp->enqueue_discovered_references(NULL);
|
||||
rp->enqueue_discovered_references(NULL, &pt);
|
||||
}
|
||||
rp->verify_no_references_recorded();
|
||||
pt.print_enqueue_phase();
|
||||
assert(!rp->discovery_enabled(), "should have been disabled");
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -984,20 +984,22 @@ void ParNewGeneration::collect(bool full,
|
||||
// Can the mt_degree be set later (at run_task() time would be best)?
|
||||
rp->set_active_mt_degree(active_workers);
|
||||
ReferenceProcessorStats stats;
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q());
|
||||
if (rp->processing_is_mt()) {
|
||||
ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
|
||||
stats = rp->process_discovered_references(&is_alive, &keep_alive,
|
||||
&evacuate_followers, &task_executor,
|
||||
_gc_timer);
|
||||
&pt);
|
||||
} else {
|
||||
thread_state_set.flush();
|
||||
gch->save_marks();
|
||||
stats = rp->process_discovered_references(&is_alive, &keep_alive,
|
||||
&evacuate_followers, NULL,
|
||||
_gc_timer);
|
||||
&pt);
|
||||
}
|
||||
_gc_tracer.report_gc_reference_stats(stats);
|
||||
_gc_tracer.report_tenuring_threshold(tenuring_threshold());
|
||||
pt.print_all_references();
|
||||
|
||||
if (!promotion_failed()) {
|
||||
// Swap the survivor spaces.
|
||||
@ -1049,14 +1051,16 @@ void ParNewGeneration::collect(bool full,
|
||||
rp->set_enqueuing_is_done(true);
|
||||
if (rp->processing_is_mt()) {
|
||||
ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
|
||||
rp->enqueue_discovered_references(&task_executor);
|
||||
rp->enqueue_discovered_references(&task_executor, &pt);
|
||||
} else {
|
||||
rp->enqueue_discovered_references(NULL);
|
||||
rp->enqueue_discovered_references(NULL, &pt);
|
||||
}
|
||||
rp->verify_no_references_recorded();
|
||||
|
||||
gch->trace_heap_after_gc(gc_tracer());
|
||||
|
||||
pt.print_enqueue_phase();
|
||||
|
||||
_gc_timer->register_gc_end();
|
||||
|
||||
_gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
|
||||
|
@ -1260,9 +1260,13 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
|
||||
assert(num_free_regions() == 0, "we should not have added any free regions");
|
||||
rebuild_region_sets(false /* free_list_only */);
|
||||
|
||||
ReferenceProcessorPhaseTimes pt(NULL, ref_processor_stw()->num_q());
|
||||
|
||||
// Enqueue any discovered reference objects that have
|
||||
// not been removed from the discovered lists.
|
||||
ref_processor_stw()->enqueue_discovered_references();
|
||||
ref_processor_stw()->enqueue_discovered_references(NULL, &pt);
|
||||
|
||||
pt.print_enqueue_phase();
|
||||
|
||||
#if defined(COMPILER2) || INCLUDE_JVMCI
|
||||
DerivedPointerTable::update_pointers();
|
||||
@ -1667,7 +1671,9 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
|
||||
G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
|
||||
CollectedHeap(),
|
||||
_collector_policy(collector_policy),
|
||||
_g1_policy(create_g1_policy()),
|
||||
_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
|
||||
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
|
||||
_g1_policy(create_g1_policy(_gc_timer_stw)),
|
||||
_collection_set(this, _g1_policy),
|
||||
_dirty_card_queue_set(false),
|
||||
_is_alive_closure_cm(this),
|
||||
@ -1694,9 +1700,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
|
||||
_expand_heap_after_alloc_failure(true),
|
||||
_old_marking_cycles_started(0),
|
||||
_old_marking_cycles_completed(0),
|
||||
_in_cset_fast_test(),
|
||||
_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
|
||||
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
|
||||
_in_cset_fast_test() {
|
||||
|
||||
_workers = new WorkGang("GC Thread", ParallelGCThreads,
|
||||
/* are_GC_task_threads */true,
|
||||
@ -2015,10 +2019,12 @@ void G1CollectedHeap::ref_processing_init() {
|
||||
|
||||
MemRegion mr = reserved_region();
|
||||
|
||||
bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
|
||||
|
||||
// Concurrent Mark ref processor
|
||||
_ref_processor_cm =
|
||||
new ReferenceProcessor(mr, // span
|
||||
ParallelRefProcEnabled && (ParallelGCThreads > 1),
|
||||
mt_processing,
|
||||
// mt processing
|
||||
ParallelGCThreads,
|
||||
// degree of mt processing
|
||||
@ -2035,7 +2041,7 @@ void G1CollectedHeap::ref_processing_init() {
|
||||
// STW ref processor
|
||||
_ref_processor_stw =
|
||||
new ReferenceProcessor(mr, // span
|
||||
ParallelRefProcEnabled && (ParallelGCThreads > 1),
|
||||
mt_processing,
|
||||
// mt processing
|
||||
ParallelGCThreads,
|
||||
// degree of mt processing
|
||||
@ -4313,6 +4319,8 @@ void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per
|
||||
// Setup the soft refs policy...
|
||||
rp->setup_policy(false);
|
||||
|
||||
ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times();
|
||||
|
||||
ReferenceProcessorStats stats;
|
||||
if (!rp->processing_is_mt()) {
|
||||
// Serial reference processing...
|
||||
@ -4320,7 +4328,7 @@ void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per
|
||||
&keep_alive,
|
||||
&drain_queue,
|
||||
NULL,
|
||||
_gc_timer_stw);
|
||||
pt);
|
||||
} else {
|
||||
uint no_of_gc_workers = workers()->active_workers();
|
||||
|
||||
@ -4334,7 +4342,7 @@ void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per
|
||||
&keep_alive,
|
||||
&drain_queue,
|
||||
&par_task_executor,
|
||||
_gc_timer_stw);
|
||||
pt);
|
||||
}
|
||||
|
||||
_gc_tracer_stw->report_gc_reference_stats(stats);
|
||||
@ -4353,11 +4361,13 @@ void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadStateSet* per
|
||||
ReferenceProcessor* rp = _ref_processor_stw;
|
||||
assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
|
||||
|
||||
ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times();
|
||||
|
||||
// Now enqueue any remaining on the discovered lists on to
|
||||
// the pending list.
|
||||
if (!rp->processing_is_mt()) {
|
||||
// Serial reference processing...
|
||||
rp->enqueue_discovered_references();
|
||||
rp->enqueue_discovered_references(NULL, pt);
|
||||
} else {
|
||||
// Parallel reference enqueueing
|
||||
|
||||
@ -4368,7 +4378,7 @@ void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadStateSet* per
|
||||
n_workers, rp->max_num_q());
|
||||
|
||||
G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers);
|
||||
rp->enqueue_discovered_references(&par_task_executor);
|
||||
rp->enqueue_discovered_references(&par_task_executor, pt);
|
||||
}
|
||||
|
||||
rp->verify_no_references_recorded();
|
||||
|
@ -298,7 +298,7 @@ private:
|
||||
size_t size,
|
||||
size_t translation_factor);
|
||||
|
||||
static G1Policy* create_g1_policy();
|
||||
static G1Policy* create_g1_policy(STWGCTimer* gc_timer);
|
||||
|
||||
void trace_heap(GCWhen::Type when, const GCTracer* tracer);
|
||||
|
||||
@ -370,6 +370,10 @@ protected:
|
||||
G1EdenRegions _eden;
|
||||
G1SurvivorRegions _survivor;
|
||||
|
||||
STWGCTimer* _gc_timer_stw;
|
||||
|
||||
G1NewTracer* _gc_tracer_stw;
|
||||
|
||||
// The current policy object for the collector.
|
||||
G1Policy* _g1_policy;
|
||||
G1HeapSizingPolicy* _heap_sizing_policy;
|
||||
@ -901,10 +905,6 @@ protected:
|
||||
// The (stw) reference processor...
|
||||
ReferenceProcessor* _ref_processor_stw;
|
||||
|
||||
STWGCTimer* _gc_timer_stw;
|
||||
|
||||
G1NewTracer* _gc_tracer_stw;
|
||||
|
||||
// During reference object discovery, the _is_alive_non_header
|
||||
// closure (if non-null) is applied to the referent object to
|
||||
// determine whether the referent is live. If so then the
|
||||
|
@ -28,6 +28,8 @@
|
||||
#include "gc/g1/g1ParScanThreadState.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
|
||||
class STWGCTimer;
|
||||
|
||||
bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
|
||||
jlong* totals,
|
||||
jbyte* accuracy,
|
||||
@ -40,6 +42,6 @@ HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
|
||||
return new HeapRegion(hrs_index, bot(), mr);
|
||||
}
|
||||
|
||||
G1Policy* G1CollectedHeap::create_g1_policy() {
|
||||
return new G1DefaultPolicy();
|
||||
G1Policy* G1CollectedHeap::create_g1_policy(STWGCTimer* gc_timer) {
|
||||
return new G1DefaultPolicy(gc_timer);
|
||||
}
|
||||
|
@ -1660,14 +1660,17 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
||||
// Reference lists are balanced (see balance_all_queues() and balance_queues()).
|
||||
rp->set_active_mt_degree(active_workers);
|
||||
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q());
|
||||
|
||||
// Process the weak references.
|
||||
const ReferenceProcessorStats& stats =
|
||||
rp->process_discovered_references(&g1_is_alive,
|
||||
&g1_keep_alive,
|
||||
&g1_drain_mark_stack,
|
||||
executor,
|
||||
_gc_timer_cm);
|
||||
&pt);
|
||||
_gc_tracer_cm->report_gc_reference_stats(stats);
|
||||
pt.print_all_references();
|
||||
|
||||
// The do_oop work routines of the keep_alive and drain_marking_stack
|
||||
// oop closures will set the has_overflown flag if we overflow the
|
||||
@ -1678,9 +1681,12 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
||||
|
||||
assert(rp->num_q() == active_workers, "why not");
|
||||
|
||||
rp->enqueue_discovered_references(executor);
|
||||
rp->enqueue_discovered_references(executor, &pt);
|
||||
|
||||
rp->verify_no_references_recorded();
|
||||
|
||||
pt.print_enqueue_phase();
|
||||
|
||||
assert(!rp->discovery_enabled(), "Post condition");
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -47,7 +47,7 @@
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/pair.hpp"
|
||||
|
||||
G1DefaultPolicy::G1DefaultPolicy() :
|
||||
G1DefaultPolicy::G1DefaultPolicy(STWGCTimer* gc_timer) :
|
||||
_predictor(G1ConfidencePercent / 100.0),
|
||||
_analytics(new G1Analytics(&_predictor)),
|
||||
_mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
|
||||
@ -63,7 +63,7 @@ G1DefaultPolicy::G1DefaultPolicy() :
|
||||
_initial_mark_to_mixed(),
|
||||
_collection_set(NULL),
|
||||
_g1(NULL),
|
||||
_phase_times(new G1GCPhaseTimes(ParallelGCThreads)),
|
||||
_phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
|
||||
_tenuring_threshold(MaxTenuringThreshold),
|
||||
_max_survivor_regions(0),
|
||||
_survivors_age_table(true),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -44,6 +44,7 @@ class G1Analytics;
|
||||
class G1SurvivorRegions;
|
||||
class G1YoungGenSizer;
|
||||
class GCPolicyCounters;
|
||||
class STWGCTimer;
|
||||
|
||||
class G1DefaultPolicy: public G1Policy {
|
||||
private:
|
||||
@ -264,7 +265,7 @@ private:
|
||||
void abort_time_to_mixed_tracking();
|
||||
public:
|
||||
|
||||
G1DefaultPolicy();
|
||||
G1DefaultPolicy(STWGCTimer* gc_timer);
|
||||
|
||||
virtual ~G1DefaultPolicy();
|
||||
|
||||
|
@ -27,7 +27,7 @@
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1HotCardCache.hpp"
|
||||
#include "gc/g1/g1StringDedup.hpp"
|
||||
#include "gc/g1/workerDataArray.inline.hpp"
|
||||
#include "gc/shared/workerDataArray.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
@ -37,10 +37,11 @@
|
||||
|
||||
static const char* Indents[5] = {"", " ", " ", " ", " "};
|
||||
|
||||
G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
|
||||
G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
|
||||
_max_gc_threads(max_gc_threads),
|
||||
_gc_start_counter(0),
|
||||
_gc_pause_time_ms(0.0)
|
||||
_gc_pause_time_ms(0.0),
|
||||
_ref_phase_times((GCTimer*)gc_timer, max_gc_threads)
|
||||
{
|
||||
assert(max_gc_threads > 0, "Must have some GC threads");
|
||||
|
||||
@ -156,6 +157,8 @@ void G1GCPhaseTimes::reset() {
|
||||
_gc_par_phases[i]->reset();
|
||||
}
|
||||
}
|
||||
|
||||
_ref_phase_times.reset();
|
||||
}
|
||||
|
||||
void G1GCPhaseTimes::note_gc_start() {
|
||||
@ -288,6 +291,19 @@ void G1GCPhaseTimes::debug_time(const char* name, double value) const {
|
||||
log_debug(gc, phases)("%s%s: " TIME_FORMAT, Indents[2], name, value);
|
||||
}
|
||||
|
||||
void G1GCPhaseTimes::debug_time_for_reference(const char* name, double value) const {
|
||||
LogTarget(Debug, gc, phases) lt;
|
||||
LogTarget(Debug, gc, phases, ref) lt2;
|
||||
|
||||
if (lt.is_enabled()) {
|
||||
LogStream ls(lt);
|
||||
ls.print_cr("%s%s: " TIME_FORMAT, Indents[2], name, value);
|
||||
} else if (lt2.is_enabled()) {
|
||||
LogStream ls(lt2);
|
||||
ls.print_cr("%s%s: " TIME_FORMAT, Indents[2], name, value);
|
||||
}
|
||||
}
|
||||
|
||||
void G1GCPhaseTimes::trace_time(const char* name, double value) const {
|
||||
log_trace(gc, phases)("%s%s: " TIME_FORMAT, Indents[3], name, value);
|
||||
}
|
||||
@ -374,7 +390,8 @@ double G1GCPhaseTimes::print_post_evacuate_collection_set() const {
|
||||
debug_time("Preserve CM Refs", _recorded_preserve_cm_referents_time_ms);
|
||||
trace_phase(_gc_par_phases[PreserveCMReferents]);
|
||||
|
||||
debug_time("Reference Processing", _cur_ref_proc_time_ms);
|
||||
debug_time_for_reference("Reference Processing", _cur_ref_proc_time_ms);
|
||||
_ref_phase_times.print_all_references(2, false);
|
||||
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
debug_time("String Dedup Fixup", _cur_string_dedup_fixup_time_ms);
|
||||
@ -390,7 +407,8 @@ double G1GCPhaseTimes::print_post_evacuate_collection_set() const {
|
||||
trace_time("Remove Self Forwards",_cur_evac_fail_remove_self_forwards);
|
||||
}
|
||||
|
||||
debug_time("Reference Enqueuing", _cur_ref_enq_time_ms);
|
||||
debug_time_for_reference("Reference Enqueuing", _cur_ref_enq_time_ms);
|
||||
_ref_phase_times.print_enqueue_phase(2, false);
|
||||
|
||||
debug_time("Merge Per-Thread State", _recorded_merge_pss_time_ms);
|
||||
debug_time("Code Roots Purge", _cur_strong_code_root_purge_time_ms);
|
||||
|
@ -25,11 +25,13 @@
|
||||
#ifndef SHARE_VM_GC_G1_G1GCPHASETIMES_HPP
|
||||
#define SHARE_VM_GC_G1_G1GCPHASETIMES_HPP
|
||||
|
||||
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
|
||||
#include "logging/logLevel.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
class LineBuffer;
|
||||
class STWGCTimer;
|
||||
|
||||
template <class T> class WorkerDataArray;
|
||||
|
||||
@ -159,6 +161,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
double _cur_verify_before_time_ms;
|
||||
double _cur_verify_after_time_ms;
|
||||
|
||||
ReferenceProcessorPhaseTimes _ref_phase_times;
|
||||
|
||||
double worker_time(GCParPhases phase, uint worker);
|
||||
void note_gc_end();
|
||||
void reset();
|
||||
@ -172,6 +176,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
|
||||
void info_time(const char* name, double value) const;
|
||||
void debug_time(const char* name, double value) const;
|
||||
// This will print logs for both 'gc+phases' and 'gc+phases+ref'.
|
||||
void debug_time_for_reference(const char* name, double value) const;
|
||||
void trace_time(const char* name, double value) const;
|
||||
void trace_count(const char* name, size_t value) const;
|
||||
|
||||
@ -181,7 +187,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
void print_other(double accounted_ms) const;
|
||||
|
||||
public:
|
||||
G1GCPhaseTimes(uint max_gc_threads);
|
||||
G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads);
|
||||
void note_gc_start();
|
||||
void print();
|
||||
|
||||
@ -354,6 +360,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
double fast_reclaim_humongous_time_ms() {
|
||||
return _cur_fast_reclaim_humongous_time_ms;
|
||||
}
|
||||
|
||||
ReferenceProcessorPhaseTimes* ref_phase_times() { return &_ref_phase_times; }
|
||||
};
|
||||
|
||||
class G1GCParPhaseTimesTracker : public StackObj {
|
||||
|
@ -149,13 +149,16 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
assert(rp == g1h->ref_processor_stw(), "Sanity");
|
||||
|
||||
rp->setup_policy(clear_all_softrefs);
|
||||
ReferenceProcessorPhaseTimes pt(gc_timer(), rp->num_q());
|
||||
|
||||
const ReferenceProcessorStats& stats =
|
||||
rp->process_discovered_references(&GenMarkSweep::is_alive,
|
||||
&GenMarkSweep::keep_alive,
|
||||
&GenMarkSweep::follow_stack_closure,
|
||||
NULL,
|
||||
gc_timer());
|
||||
&pt);
|
||||
gc_tracer()->report_gc_reference_stats(stats);
|
||||
pt.print_all_references();
|
||||
}
|
||||
|
||||
// This is the point where the entire marking should have completed.
|
||||
|
@ -254,7 +254,11 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
||||
DerivedPointerTable::update_pointers();
|
||||
#endif
|
||||
|
||||
ref_processor()->enqueue_discovered_references(NULL);
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q());
|
||||
|
||||
ref_processor()->enqueue_discovered_references(NULL, &pt);
|
||||
|
||||
pt.print_enqueue_phase();
|
||||
|
||||
// Update time of last GC
|
||||
reset_millis_since_last_gc();
|
||||
@ -528,10 +532,12 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
||||
GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer);
|
||||
|
||||
ref_processor()->setup_policy(clear_all_softrefs);
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q());
|
||||
const ReferenceProcessorStats& stats =
|
||||
ref_processor()->process_discovered_references(
|
||||
is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer);
|
||||
is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, &pt);
|
||||
gc_tracer()->report_gc_reference_stats(stats);
|
||||
pt.print_all_references();
|
||||
}
|
||||
|
||||
// This is the point where the entire marking should have completed.
|
||||
|
@ -1041,7 +1041,11 @@ void PSParallelCompact::post_compact()
|
||||
DerivedPointerTable::update_pointers();
|
||||
#endif
|
||||
|
||||
ref_processor()->enqueue_discovered_references(NULL);
|
||||
ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_q());
|
||||
|
||||
ref_processor()->enqueue_discovered_references(NULL, &pt);
|
||||
|
||||
pt.print_enqueue_phase();
|
||||
|
||||
if (ZapUnusedHeapArea) {
|
||||
heap->gen_mangle_unused_area();
|
||||
@ -2103,18 +2107,20 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
|
||||
GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
|
||||
|
||||
ReferenceProcessorStats stats;
|
||||
ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_q());
|
||||
if (ref_processor()->processing_is_mt()) {
|
||||
RefProcTaskExecutor task_executor;
|
||||
stats = ref_processor()->process_discovered_references(
|
||||
is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
|
||||
&task_executor, &_gc_timer);
|
||||
&task_executor, &pt);
|
||||
} else {
|
||||
stats = ref_processor()->process_discovered_references(
|
||||
is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
|
||||
&_gc_timer);
|
||||
&pt);
|
||||
}
|
||||
|
||||
gc_tracer->report_gc_reference_stats(stats);
|
||||
pt.print_all_references();
|
||||
}
|
||||
|
||||
// This is the point where the entire marking should have completed.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -415,25 +415,29 @@ bool PSScavenge::invoke_no_policy() {
|
||||
PSKeepAliveClosure keep_alive(promotion_manager);
|
||||
PSEvacuateFollowersClosure evac_followers(promotion_manager);
|
||||
ReferenceProcessorStats stats;
|
||||
ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->num_q());
|
||||
if (reference_processor()->processing_is_mt()) {
|
||||
PSRefProcTaskExecutor task_executor;
|
||||
stats = reference_processor()->process_discovered_references(
|
||||
&_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
|
||||
&_gc_timer);
|
||||
&pt);
|
||||
} else {
|
||||
stats = reference_processor()->process_discovered_references(
|
||||
&_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
|
||||
&_is_alive_closure, &keep_alive, &evac_followers, NULL, &pt);
|
||||
}
|
||||
|
||||
_gc_tracer.report_gc_reference_stats(stats);
|
||||
pt.print_all_references();
|
||||
|
||||
// Enqueue reference objects discovered during scavenge.
|
||||
if (reference_processor()->processing_is_mt()) {
|
||||
PSRefProcTaskExecutor task_executor;
|
||||
reference_processor()->enqueue_discovered_references(&task_executor);
|
||||
reference_processor()->enqueue_discovered_references(&task_executor, &pt);
|
||||
} else {
|
||||
reference_processor()->enqueue_discovered_references(NULL);
|
||||
reference_processor()->enqueue_discovered_references(NULL, &pt);
|
||||
}
|
||||
|
||||
pt.print_enqueue_phase();
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -661,11 +661,13 @@ void DefNewGeneration::collect(bool full,
|
||||
FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
|
||||
ReferenceProcessor* rp = ref_processor();
|
||||
rp->setup_policy(clear_all_soft_refs);
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q());
|
||||
const ReferenceProcessorStats& stats =
|
||||
rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
|
||||
NULL, _gc_timer);
|
||||
NULL, &pt);
|
||||
gc_tracer.report_gc_reference_stats(stats);
|
||||
gc_tracer.report_tenuring_threshold(tenuring_threshold());
|
||||
pt.print_all_references();
|
||||
|
||||
if (!_promotion_failed) {
|
||||
// Swap the survivor spaces.
|
||||
|
@ -210,9 +210,11 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
||||
GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer());
|
||||
|
||||
ref_processor()->setup_policy(clear_all_softrefs);
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q());
|
||||
const ReferenceProcessorStats& stats =
|
||||
ref_processor()->process_discovered_references(
|
||||
&is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer);
|
||||
&is_alive, &keep_alive, &follow_stack_closure, NULL, &pt);
|
||||
pt.print_all_references();
|
||||
gc_tracer()->report_gc_reference_stats(stats);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -78,8 +78,8 @@ class GCPhase {
|
||||
|
||||
class PhasesStack {
|
||||
public:
|
||||
// Set to 5, since Reference processing needs it.
|
||||
static const int PHASE_LEVELS = 5;
|
||||
// Set to 6, since Reference processing needs it.
|
||||
static const int PHASE_LEVELS = 6;
|
||||
|
||||
private:
|
||||
int _phase_indices[PHASE_LEVELS];
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -428,6 +428,7 @@ class PhaseSender : public PhaseVisitor {
|
||||
case 1: send_phase<EventGCPhasePauseLevel1>(phase); break;
|
||||
case 2: send_phase<EventGCPhasePauseLevel2>(phase); break;
|
||||
case 3: send_phase<EventGCPhasePauseLevel3>(phase); break;
|
||||
case 4: send_phase<EventGCPhasePauseLevel4>(phase); break;
|
||||
default: /* Ignore sending this phase */ break;
|
||||
}
|
||||
}
|
||||
|
@ -387,7 +387,9 @@ void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t siz
|
||||
}
|
||||
gen->collect(full, clear_soft_refs, size, is_tlab);
|
||||
if (!rp->enqueuing_is_done()) {
|
||||
rp->enqueue_discovered_references();
|
||||
ReferenceProcessorPhaseTimes pt(NULL, rp->num_q());
|
||||
rp->enqueue_discovered_references(NULL, &pt);
|
||||
pt.print_enqueue_phase();
|
||||
} else {
|
||||
rp->set_enqueuing_is_done(false);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -179,7 +179,7 @@ void ReferenceProcessor::update_soft_ref_master_clock() {
|
||||
// past clock value.
|
||||
}
|
||||
|
||||
size_t ReferenceProcessor::total_count(DiscoveredList lists[]) {
|
||||
size_t ReferenceProcessor::total_count(DiscoveredList lists[]) const {
|
||||
size_t total = 0;
|
||||
for (uint i = 0; i < _max_num_q; ++i) {
|
||||
total += lists[i].length();
|
||||
@ -192,7 +192,9 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
|
||||
OopClosure* keep_alive,
|
||||
VoidClosure* complete_gc,
|
||||
AbstractRefProcTaskExecutor* task_executor,
|
||||
GCTimer* gc_timer) {
|
||||
ReferenceProcessorPhaseTimes* phase_times) {
|
||||
|
||||
double start_time = os::elapsedTime();
|
||||
|
||||
assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
|
||||
// Stop treating discovered references specially.
|
||||
@ -208,40 +210,39 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
|
||||
|
||||
_soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock();
|
||||
|
||||
ReferenceProcessorStats stats(
|
||||
total_count(_discoveredSoftRefs),
|
||||
ReferenceProcessorStats stats(total_count(_discoveredSoftRefs),
|
||||
total_count(_discoveredWeakRefs),
|
||||
total_count(_discoveredFinalRefs),
|
||||
total_count(_discoveredPhantomRefs));
|
||||
|
||||
// Soft references
|
||||
{
|
||||
GCTraceTime(Debug, gc, ref) tt("SoftReference", gc_timer);
|
||||
RefProcPhaseTimesTracker tt(REF_SOFT, phase_times, this);
|
||||
process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
|
||||
is_alive, keep_alive, complete_gc, task_executor);
|
||||
is_alive, keep_alive, complete_gc, task_executor, phase_times);
|
||||
}
|
||||
|
||||
update_soft_ref_master_clock();
|
||||
|
||||
// Weak references
|
||||
{
|
||||
GCTraceTime(Debug, gc, ref) tt("WeakReference", gc_timer);
|
||||
RefProcPhaseTimesTracker tt(REF_WEAK, phase_times, this);
|
||||
process_discovered_reflist(_discoveredWeakRefs, NULL, true,
|
||||
is_alive, keep_alive, complete_gc, task_executor);
|
||||
is_alive, keep_alive, complete_gc, task_executor, phase_times);
|
||||
}
|
||||
|
||||
// Final references
|
||||
{
|
||||
GCTraceTime(Debug, gc, ref) tt("FinalReference", gc_timer);
|
||||
RefProcPhaseTimesTracker tt(REF_FINAL, phase_times, this);
|
||||
process_discovered_reflist(_discoveredFinalRefs, NULL, false,
|
||||
is_alive, keep_alive, complete_gc, task_executor);
|
||||
is_alive, keep_alive, complete_gc, task_executor, phase_times);
|
||||
}
|
||||
|
||||
// Phantom references
|
||||
{
|
||||
GCTraceTime(Debug, gc, ref) tt("PhantomReference", gc_timer);
|
||||
RefProcPhaseTimesTracker tt(REF_PHANTOM, phase_times, this);
|
||||
process_discovered_reflist(_discoveredPhantomRefs, NULL, true,
|
||||
is_alive, keep_alive, complete_gc, task_executor);
|
||||
is_alive, keep_alive, complete_gc, task_executor, phase_times);
|
||||
}
|
||||
|
||||
// Weak global JNI references. It would make more sense (semantically) to
|
||||
@ -250,15 +251,15 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
|
||||
// thus use JNI weak references to circumvent the phantom references and
|
||||
// resurrect a "post-mortem" object.
|
||||
{
|
||||
GCTraceTime(Debug, gc, ref) tt("JNI Weak Reference", gc_timer);
|
||||
GCTraceTime(Debug, gc, ref) tt("JNI Weak Reference", phase_times->gc_timer());
|
||||
if (task_executor != NULL) {
|
||||
task_executor->set_single_threaded_mode();
|
||||
}
|
||||
process_phaseJNI(is_alive, keep_alive, complete_gc);
|
||||
}
|
||||
|
||||
log_debug(gc, ref)("Ref Counts: Soft: " SIZE_FORMAT " Weak: " SIZE_FORMAT " Final: " SIZE_FORMAT " Phantom: " SIZE_FORMAT,
|
||||
stats.soft_count(), stats.weak_count(), stats.final_count(), stats.phantom_count());
|
||||
phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000);
|
||||
|
||||
log_develop_trace(gc, ref)("JNI Weak Reference count: " SIZE_FORMAT, count_jni_refs());
|
||||
|
||||
return stats;
|
||||
@ -289,10 +290,11 @@ void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
|
||||
complete_gc->do_void();
|
||||
}
|
||||
|
||||
void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
|
||||
void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor,
|
||||
ReferenceProcessorPhaseTimes* phase_times) {
|
||||
// Enqueue references that are not made active again, and
|
||||
// clear the decks for the next collection (cycle).
|
||||
enqueue_discovered_reflists(task_executor);
|
||||
enqueue_discovered_reflists(task_executor, phase_times);
|
||||
|
||||
// Stop treating discovered references specially.
|
||||
disable_discovery();
|
||||
@ -345,11 +347,14 @@ class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
|
||||
public:
|
||||
RefProcEnqueueTask(ReferenceProcessor& ref_processor,
|
||||
DiscoveredList discovered_refs[],
|
||||
int n_queues)
|
||||
: EnqueueTask(ref_processor, discovered_refs, n_queues)
|
||||
int n_queues,
|
||||
ReferenceProcessorPhaseTimes* phase_times)
|
||||
: EnqueueTask(ref_processor, discovered_refs, n_queues, phase_times)
|
||||
{ }
|
||||
|
||||
virtual void work(unsigned int work_id) {
|
||||
RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefEnqueue, _phase_times, work_id);
|
||||
|
||||
assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
|
||||
// Simplest first cut: static partitioning.
|
||||
int index = work_id;
|
||||
@ -369,10 +374,19 @@ public:
|
||||
};
|
||||
|
||||
// Enqueue references that are not made active again
|
||||
void ReferenceProcessor::enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor) {
|
||||
void ReferenceProcessor::enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor,
|
||||
ReferenceProcessorPhaseTimes* phase_times) {
|
||||
|
||||
ReferenceProcessorStats stats(total_count(_discoveredSoftRefs),
|
||||
total_count(_discoveredWeakRefs),
|
||||
total_count(_discoveredFinalRefs),
|
||||
total_count(_discoveredPhantomRefs));
|
||||
|
||||
RefProcEnqueueTimeTracker tt(phase_times, stats);
|
||||
|
||||
if (_processing_is_mt && task_executor != NULL) {
|
||||
// Parallel code
|
||||
RefProcEnqueueTask tsk(*this, _discovered_refs, _max_num_q);
|
||||
RefProcEnqueueTask tsk(*this, _discovered_refs, _max_num_q, phase_times);
|
||||
task_executor->execute(tsk);
|
||||
} else {
|
||||
// Serial code: call the parent class's implementation
|
||||
@ -469,7 +483,7 @@ ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
|
||||
complete_gc->do_void();
|
||||
log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT,
|
||||
iter.removed(), iter.processed(), p2i(&refs_list));
|
||||
}
|
||||
}
|
||||
|
||||
// Traverse the list and remove any Refs that are not active, or
|
||||
// whose referents are either alive or NULL.
|
||||
@ -598,19 +612,46 @@ void ReferenceProcessor::abandon_partial_discovery() {
|
||||
}
|
||||
}
|
||||
|
||||
size_t ReferenceProcessor::total_reference_count(ReferenceType type) const {
|
||||
DiscoveredList* list = NULL;
|
||||
|
||||
switch (type) {
|
||||
case REF_SOFT:
|
||||
list = _discoveredSoftRefs;
|
||||
break;
|
||||
case REF_WEAK:
|
||||
list = _discoveredWeakRefs;
|
||||
break;
|
||||
case REF_FINAL:
|
||||
list = _discoveredFinalRefs;
|
||||
break;
|
||||
case REF_PHANTOM:
|
||||
list = _discoveredPhantomRefs;
|
||||
break;
|
||||
case REF_OTHER:
|
||||
case REF_NONE:
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
return total_count(list);
|
||||
}
|
||||
|
||||
class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
|
||||
public:
|
||||
RefProcPhase1Task(ReferenceProcessor& ref_processor,
|
||||
DiscoveredList refs_lists[],
|
||||
ReferencePolicy* policy,
|
||||
bool marks_oops_alive)
|
||||
: ProcessTask(ref_processor, refs_lists, marks_oops_alive),
|
||||
bool marks_oops_alive,
|
||||
ReferenceProcessorPhaseTimes* phase_times)
|
||||
: ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times),
|
||||
_policy(policy)
|
||||
{ }
|
||||
virtual void work(unsigned int i, BoolObjectClosure& is_alive,
|
||||
OopClosure& keep_alive,
|
||||
VoidClosure& complete_gc)
|
||||
{
|
||||
RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase1, _phase_times, i);
|
||||
|
||||
_ref_processor.process_phase1(_refs_lists[i], _policy,
|
||||
&is_alive, &keep_alive, &complete_gc);
|
||||
}
|
||||
@ -622,13 +663,16 @@ class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask {
|
||||
public:
|
||||
RefProcPhase2Task(ReferenceProcessor& ref_processor,
|
||||
DiscoveredList refs_lists[],
|
||||
bool marks_oops_alive)
|
||||
: ProcessTask(ref_processor, refs_lists, marks_oops_alive)
|
||||
bool marks_oops_alive,
|
||||
ReferenceProcessorPhaseTimes* phase_times)
|
||||
: ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times)
|
||||
{ }
|
||||
virtual void work(unsigned int i, BoolObjectClosure& is_alive,
|
||||
OopClosure& keep_alive,
|
||||
VoidClosure& complete_gc)
|
||||
{
|
||||
RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase2, _phase_times, i);
|
||||
|
||||
_ref_processor.process_phase2(_refs_lists[i],
|
||||
&is_alive, &keep_alive, &complete_gc);
|
||||
}
|
||||
@ -639,14 +683,17 @@ public:
|
||||
RefProcPhase3Task(ReferenceProcessor& ref_processor,
|
||||
DiscoveredList refs_lists[],
|
||||
bool clear_referent,
|
||||
bool marks_oops_alive)
|
||||
: ProcessTask(ref_processor, refs_lists, marks_oops_alive),
|
||||
bool marks_oops_alive,
|
||||
ReferenceProcessorPhaseTimes* phase_times)
|
||||
: ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times),
|
||||
_clear_referent(clear_referent)
|
||||
{ }
|
||||
virtual void work(unsigned int i, BoolObjectClosure& is_alive,
|
||||
OopClosure& keep_alive,
|
||||
VoidClosure& complete_gc)
|
||||
{
|
||||
RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase3, _phase_times, i);
|
||||
|
||||
_ref_processor.process_phase3(_refs_lists[i], _clear_referent,
|
||||
&is_alive, &keep_alive, &complete_gc);
|
||||
}
|
||||
@ -776,9 +823,13 @@ void ReferenceProcessor::process_discovered_reflist(
|
||||
BoolObjectClosure* is_alive,
|
||||
OopClosure* keep_alive,
|
||||
VoidClosure* complete_gc,
|
||||
AbstractRefProcTaskExecutor* task_executor)
|
||||
AbstractRefProcTaskExecutor* task_executor,
|
||||
ReferenceProcessorPhaseTimes* phase_times)
|
||||
{
|
||||
bool mt_processing = task_executor != NULL && _processing_is_mt;
|
||||
|
||||
phase_times->set_processing_is_mt(mt_processing);
|
||||
|
||||
// If discovery used MT and a dynamic number of GC threads, then
|
||||
// the queues must be balanced for correctness if fewer than the
|
||||
// maximum number of queues were used. The number of queue used
|
||||
@ -789,6 +840,7 @@ void ReferenceProcessor::process_discovered_reflist(
|
||||
|
||||
if ((mt_processing && ParallelRefProcBalancingEnabled) ||
|
||||
must_balance) {
|
||||
RefProcBalanceQueuesTimeTracker tt(phase_times);
|
||||
balance_queues(refs_lists);
|
||||
}
|
||||
|
||||
@ -798,8 +850,10 @@ void ReferenceProcessor::process_discovered_reflist(
|
||||
// policy reasons. Keep alive the transitive closure of all
|
||||
// such referents.
|
||||
if (policy != NULL) {
|
||||
RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase1, phase_times);
|
||||
|
||||
if (mt_processing) {
|
||||
RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
|
||||
RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/, phase_times);
|
||||
task_executor->execute(phase1);
|
||||
} else {
|
||||
for (uint i = 0; i < _max_num_q; i++) {
|
||||
@ -814,19 +868,26 @@ void ReferenceProcessor::process_discovered_reflist(
|
||||
|
||||
// Phase 2:
|
||||
// . Traverse the list and remove any refs whose referents are alive.
|
||||
{
|
||||
RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase2, phase_times);
|
||||
|
||||
if (mt_processing) {
|
||||
RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
|
||||
RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/, phase_times);
|
||||
task_executor->execute(phase2);
|
||||
} else {
|
||||
for (uint i = 0; i < _max_num_q; i++) {
|
||||
process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 3:
|
||||
// . Traverse the list and process referents as appropriate.
|
||||
{
|
||||
RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase3, phase_times);
|
||||
|
||||
if (mt_processing) {
|
||||
RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
|
||||
RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/, phase_times);
|
||||
task_executor->execute(phase3);
|
||||
} else {
|
||||
for (uint i = 0; i < _max_num_q; i++) {
|
||||
@ -834,6 +895,7 @@ void ReferenceProcessor::process_discovered_reflist(
|
||||
is_alive, keep_alive, complete_gc);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
|
||||
@ -1196,4 +1258,3 @@ const char* ReferenceProcessor::list_name(uint i) {
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,6 +26,7 @@
|
||||
#define SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_HPP
|
||||
|
||||
#include "gc/shared/referencePolicy.hpp"
|
||||
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
|
||||
#include "gc/shared/referenceProcessorStats.hpp"
|
||||
#include "memory/referenceType.hpp"
|
||||
#include "oops/instanceRefKlass.hpp"
|
||||
@ -168,7 +169,7 @@ public:
|
||||
class ReferenceProcessor : public CHeapObj<mtGC> {
|
||||
|
||||
private:
|
||||
size_t total_count(DiscoveredList lists[]);
|
||||
size_t total_count(DiscoveredList lists[]) const;
|
||||
|
||||
protected:
|
||||
// The SoftReference master timestamp clock
|
||||
@ -242,7 +243,8 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
|
||||
BoolObjectClosure* is_alive,
|
||||
OopClosure* keep_alive,
|
||||
VoidClosure* complete_gc,
|
||||
AbstractRefProcTaskExecutor* task_executor);
|
||||
AbstractRefProcTaskExecutor* task_executor,
|
||||
ReferenceProcessorPhaseTimes* phase_times);
|
||||
|
||||
void process_phaseJNI(BoolObjectClosure* is_alive,
|
||||
OopClosure* keep_alive,
|
||||
@ -310,7 +312,8 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
|
||||
// occupying the i / _num_q slot.
|
||||
const char* list_name(uint i);
|
||||
|
||||
void enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor);
|
||||
void enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor,
|
||||
ReferenceProcessorPhaseTimes* phase_times);
|
||||
|
||||
protected:
|
||||
// "Preclean" the given discovered reference list
|
||||
@ -420,16 +423,19 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
|
||||
OopClosure* keep_alive,
|
||||
VoidClosure* complete_gc,
|
||||
AbstractRefProcTaskExecutor* task_executor,
|
||||
GCTimer *gc_timer);
|
||||
ReferenceProcessorPhaseTimes* phase_times);
|
||||
|
||||
// Enqueue references at end of GC (called by the garbage collector)
|
||||
void enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
|
||||
void enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor,
|
||||
ReferenceProcessorPhaseTimes* phase_times);
|
||||
|
||||
// If a discovery is in process that is being superceded, abandon it: all
|
||||
// the discovered lists will be empty, and all the objects on them will
|
||||
// have NULL discovered fields. Must be called only at a safepoint.
|
||||
void abandon_partial_discovery();
|
||||
|
||||
size_t total_reference_count(ReferenceType rt) const;
|
||||
|
||||
// debugging
|
||||
void verify_no_references_recorded() PRODUCT_RETURN;
|
||||
void verify_referent(oop obj) PRODUCT_RETURN;
|
||||
@ -586,9 +592,11 @@ class AbstractRefProcTaskExecutor::ProcessTask {
|
||||
protected:
|
||||
ProcessTask(ReferenceProcessor& ref_processor,
|
||||
DiscoveredList refs_lists[],
|
||||
bool marks_oops_alive)
|
||||
bool marks_oops_alive,
|
||||
ReferenceProcessorPhaseTimes* phase_times)
|
||||
: _ref_processor(ref_processor),
|
||||
_refs_lists(refs_lists),
|
||||
_phase_times(phase_times),
|
||||
_marks_oops_alive(marks_oops_alive)
|
||||
{ }
|
||||
|
||||
@ -604,6 +612,7 @@ public:
|
||||
protected:
|
||||
ReferenceProcessor& _ref_processor;
|
||||
DiscoveredList* _refs_lists;
|
||||
ReferenceProcessorPhaseTimes* _phase_times;
|
||||
const bool _marks_oops_alive;
|
||||
};
|
||||
|
||||
@ -612,10 +621,12 @@ class AbstractRefProcTaskExecutor::EnqueueTask {
|
||||
protected:
|
||||
EnqueueTask(ReferenceProcessor& ref_processor,
|
||||
DiscoveredList refs_lists[],
|
||||
int n_queues)
|
||||
int n_queues,
|
||||
ReferenceProcessorPhaseTimes* phase_times)
|
||||
: _ref_processor(ref_processor),
|
||||
_refs_lists(refs_lists),
|
||||
_n_queues(n_queues)
|
||||
_n_queues(n_queues),
|
||||
_phase_times(phase_times)
|
||||
{ }
|
||||
|
||||
public:
|
||||
@ -624,6 +635,7 @@ public:
|
||||
protected:
|
||||
ReferenceProcessor& _ref_processor;
|
||||
DiscoveredList* _refs_lists;
|
||||
ReferenceProcessorPhaseTimes* _phase_times;
|
||||
int _n_queues;
|
||||
};
|
||||
|
||||
|
475
hotspot/src/share/vm/gc/shared/referenceProcessorPhaseTimes.cpp
Normal file
475
hotspot/src/share/vm/gc/shared/referenceProcessorPhaseTimes.cpp
Normal file
@ -0,0 +1,475 @@
|
||||
/*
|
||||
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcTimer.hpp"
|
||||
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
|
||||
#include "gc/shared/referenceProcessor.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
|
||||
RefProcWorkerTimeTracker::RefProcWorkerTimeTracker(ReferenceProcessorPhaseTimes::RefProcPhaseNumbers number,
|
||||
ReferenceProcessorPhaseTimes* phase_times,
|
||||
uint worker_id) :
|
||||
_worker_time(NULL), _start_time(os::elapsedTime()), _worker_id(worker_id) {
|
||||
assert (phase_times != NULL, "Invariant");
|
||||
|
||||
_worker_time = phase_times->worker_time_sec(phase_times->par_phase(number));
|
||||
}
|
||||
|
||||
RefProcWorkerTimeTracker::RefProcWorkerTimeTracker(ReferenceProcessorPhaseTimes::RefProcParPhases phase,
|
||||
ReferenceProcessorPhaseTimes* phase_times,
|
||||
uint worker_id) :
|
||||
_worker_time(NULL), _start_time(os::elapsedTime()), _worker_id(worker_id) {
|
||||
assert (phase_times != NULL, "Invariant");
|
||||
|
||||
_worker_time = phase_times->worker_time_sec(phase);
|
||||
}
|
||||
|
||||
RefProcWorkerTimeTracker::~RefProcWorkerTimeTracker() {
|
||||
_worker_time->set(_worker_id, os::elapsedTime() - _start_time);
|
||||
}
|
||||
|
||||
RefProcPhaseTimeBaseTracker::RefProcPhaseTimeBaseTracker(const char* title,
|
||||
ReferenceProcessorPhaseTimes* phase_times) :
|
||||
_title(title), _phase_times(phase_times), _start_ticks(), _end_ticks() {
|
||||
assert(_phase_times != NULL, "Invariant");
|
||||
|
||||
_start_ticks.stamp();
|
||||
if (_phase_times->gc_timer() != NULL) {
|
||||
_phase_times->gc_timer()->register_gc_phase_start(_title, _start_ticks);
|
||||
}
|
||||
}
|
||||
|
||||
static const char* phase_enum_2_phase_string(ReferenceProcessorPhaseTimes::RefProcParPhases phase) {
|
||||
switch(phase) {
|
||||
case ReferenceProcessorPhaseTimes::SoftRefPhase1:
|
||||
return "Phase1";
|
||||
case ReferenceProcessorPhaseTimes::SoftRefPhase2:
|
||||
case ReferenceProcessorPhaseTimes::WeakRefPhase2:
|
||||
case ReferenceProcessorPhaseTimes::FinalRefPhase2:
|
||||
case ReferenceProcessorPhaseTimes::PhantomRefPhase2:
|
||||
return "Phase2";
|
||||
case ReferenceProcessorPhaseTimes::SoftRefPhase3:
|
||||
case ReferenceProcessorPhaseTimes::WeakRefPhase3:
|
||||
case ReferenceProcessorPhaseTimes::FinalRefPhase3:
|
||||
case ReferenceProcessorPhaseTimes::PhantomRefPhase3:
|
||||
return "Phase3";
|
||||
case ReferenceProcessorPhaseTimes::RefEnqueue:
|
||||
return "Reference Enqueuing";
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static const char* Indents[6] = {"", " ", " ", " ", " ", " "};
|
||||
|
||||
Ticks RefProcPhaseTimeBaseTracker::end_ticks() {
|
||||
// If ASSERT is defined, the default value of Ticks will be -2.
|
||||
if (_end_ticks.value() <= 0) {
|
||||
_end_ticks.stamp();
|
||||
}
|
||||
|
||||
return _end_ticks;
|
||||
}
|
||||
|
||||
double RefProcPhaseTimeBaseTracker::elapsed_time() {
|
||||
jlong end_value = end_ticks().value();
|
||||
|
||||
return TimeHelper::counter_to_millis(end_value - _start_ticks.value());
|
||||
}
|
||||
|
||||
RefProcPhaseTimeBaseTracker::~RefProcPhaseTimeBaseTracker() {
|
||||
if (_phase_times->gc_timer() != NULL) {
|
||||
Ticks ticks = end_ticks();
|
||||
_phase_times->gc_timer()->register_gc_phase_end(ticks);
|
||||
}
|
||||
}
|
||||
|
||||
RefProcBalanceQueuesTimeTracker::RefProcBalanceQueuesTimeTracker(ReferenceProcessorPhaseTimes* phase_times) :
|
||||
RefProcPhaseTimeBaseTracker("Balance queues", phase_times) {}
|
||||
|
||||
RefProcBalanceQueuesTimeTracker::~RefProcBalanceQueuesTimeTracker() {
|
||||
double elapsed = elapsed_time();
|
||||
phase_times()->set_balance_queues_time_ms(phase_times()->processing_ref_type(), elapsed);
|
||||
}
|
||||
|
||||
#define ASSERT_REF_TYPE(ref_type) assert(ref_type >= REF_SOFT && ref_type <= REF_PHANTOM, \
|
||||
"Invariant (%d)", (int)ref_type)
|
||||
|
||||
#define ASSERT_PHASE_NUMBER(phase_number) assert(phase_number >= ReferenceProcessorPhaseTimes::RefPhase1 && \
|
||||
phase_number <= ReferenceProcessorPhaseTimes::RefPhaseMax, \
|
||||
"Invariant (%d)", phase_number);
|
||||
|
||||
static const char* phase_number_2_string(ReferenceProcessorPhaseTimes::RefProcPhaseNumbers phase_number) {
|
||||
ASSERT_PHASE_NUMBER(phase_number);
|
||||
|
||||
switch(phase_number) {
|
||||
case ReferenceProcessorPhaseTimes::RefPhase1:
|
||||
return "Phase1";
|
||||
case ReferenceProcessorPhaseTimes::RefPhase2:
|
||||
return "Phase2";
|
||||
case ReferenceProcessorPhaseTimes::RefPhase3:
|
||||
return "Phase3";
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
RefProcParPhaseTimeTracker::RefProcParPhaseTimeTracker(ReferenceProcessorPhaseTimes::RefProcPhaseNumbers phase_number,
|
||||
ReferenceProcessorPhaseTimes* phase_times) :
|
||||
_phase_number(phase_number),
|
||||
RefProcPhaseTimeBaseTracker(phase_number_2_string(phase_number), phase_times) {}
|
||||
|
||||
RefProcParPhaseTimeTracker::~RefProcParPhaseTimeTracker() {
|
||||
double elapsed = elapsed_time();
|
||||
ReferenceProcessorPhaseTimes::RefProcParPhases phase = phase_times()->par_phase(_phase_number);
|
||||
phase_times()->set_par_phase_time_ms(phase, elapsed);
|
||||
}
|
||||
|
||||
static const char* ref_type_2_string(ReferenceType ref_type) {
|
||||
ASSERT_REF_TYPE(ref_type);
|
||||
|
||||
switch(ref_type) {
|
||||
case REF_SOFT:
|
||||
return "SoftReference";
|
||||
case REF_WEAK:
|
||||
return "WeakReference";
|
||||
case REF_FINAL:
|
||||
return "FinalReference";
|
||||
case REF_PHANTOM:
|
||||
return "PhantomReference";
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
RefProcPhaseTimesTracker::RefProcPhaseTimesTracker(ReferenceType ref_type,
|
||||
ReferenceProcessorPhaseTimes* phase_times,
|
||||
ReferenceProcessor* rp) :
|
||||
_rp(rp), RefProcPhaseTimeBaseTracker(ref_type_2_string(ref_type), phase_times) {
|
||||
phase_times->set_processing_ref_type(ref_type);
|
||||
|
||||
size_t discovered = rp->total_reference_count(ref_type);
|
||||
phase_times->set_ref_discovered(ref_type, discovered);
|
||||
}
|
||||
|
||||
RefProcPhaseTimesTracker::~RefProcPhaseTimesTracker() {
|
||||
double elapsed = elapsed_time();
|
||||
ReferenceProcessorPhaseTimes* times = phase_times();
|
||||
ReferenceType ref_type = times->processing_ref_type();
|
||||
times->set_ref_proc_time_ms(ref_type, elapsed);
|
||||
|
||||
size_t after_count = _rp->total_reference_count(ref_type);
|
||||
size_t discovered = times->ref_discovered(ref_type);
|
||||
times->set_ref_cleared(ref_type, discovered - after_count);
|
||||
}
|
||||
|
||||
RefProcEnqueueTimeTracker::RefProcEnqueueTimeTracker(ReferenceProcessorPhaseTimes* phase_times,
|
||||
ReferenceProcessorStats& stats) :
|
||||
RefProcPhaseTimeBaseTracker("Reference Enqueuing", phase_times) {
|
||||
phase_times->set_ref_enqueued(REF_SOFT, stats.soft_count());
|
||||
phase_times->set_ref_enqueued(REF_WEAK, stats.weak_count());
|
||||
phase_times->set_ref_enqueued(REF_FINAL, stats.final_count());
|
||||
phase_times->set_ref_enqueued(REF_PHANTOM, stats.phantom_count());
|
||||
}
|
||||
|
||||
RefProcEnqueueTimeTracker::~RefProcEnqueueTimeTracker() {
|
||||
double elapsed = elapsed_time();
|
||||
|
||||
phase_times()->set_par_phase_time_ms(ReferenceProcessorPhaseTimes::RefEnqueue, elapsed);
|
||||
}
|
||||
|
||||
ReferenceProcessorPhaseTimes::ReferenceProcessorPhaseTimes(GCTimer* gc_timer, uint max_gc_threads) :
|
||||
_gc_timer(gc_timer), _processing_is_mt(false) {
|
||||
|
||||
for (int i = 0; i < RefParPhaseMax; i++) {
|
||||
_worker_time_sec[i] = new WorkerDataArray<double>(max_gc_threads, "Process lists (ms)");
|
||||
_par_phase_time_ms[i] = uninitialized();
|
||||
}
|
||||
|
||||
for (int i = 0; i < number_of_subclasses_of_ref; i++) {
|
||||
_ref_proc_time_ms[i] = uninitialized();
|
||||
_balance_queues_time_ms[i] = uninitialized();
|
||||
_ref_cleared[i] = 0;
|
||||
_ref_discovered[i] = 0;
|
||||
_ref_enqueued[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
inline int ref_type_2_index(ReferenceType ref_type) {
|
||||
return ref_type - REF_SOFT;
|
||||
}
|
||||
|
||||
#define ASSERT_PAR_PHASE(phase) assert(phase >= ReferenceProcessorPhaseTimes::SoftRefPhase1 && \
|
||||
phase < ReferenceProcessorPhaseTimes::RefParPhaseMax, \
|
||||
"Invariant (%d)", (int)phase);
|
||||
|
||||
WorkerDataArray<double>* ReferenceProcessorPhaseTimes::worker_time_sec(RefProcParPhases par_phase) const {
|
||||
ASSERT_PAR_PHASE(par_phase);
|
||||
return _worker_time_sec[par_phase];
|
||||
}
|
||||
|
||||
double ReferenceProcessorPhaseTimes::par_phase_time_ms(RefProcParPhases par_phase) const {
|
||||
ASSERT_PAR_PHASE(par_phase);
|
||||
return _par_phase_time_ms[par_phase];
|
||||
}
|
||||
|
||||
void ReferenceProcessorPhaseTimes::set_par_phase_time_ms(RefProcParPhases par_phase,
|
||||
double par_phase_time_ms) {
|
||||
ASSERT_PAR_PHASE(par_phase);
|
||||
_par_phase_time_ms[par_phase] = par_phase_time_ms;
|
||||
}
|
||||
|
||||
void ReferenceProcessorPhaseTimes::reset() {
|
||||
for (int i = 0; i < RefParPhaseMax; i++) {
|
||||
_worker_time_sec[i]->reset();
|
||||
_par_phase_time_ms[i] = uninitialized();
|
||||
}
|
||||
|
||||
for (int i = 0; i < number_of_subclasses_of_ref; i++) {
|
||||
_ref_proc_time_ms[i] = uninitialized();
|
||||
_balance_queues_time_ms[i] = uninitialized();
|
||||
_ref_cleared[i] = 0;
|
||||
_ref_discovered[i] = 0;
|
||||
_ref_enqueued[i] = 0;
|
||||
}
|
||||
|
||||
_total_time_ms = uninitialized();
|
||||
|
||||
_processing_is_mt = false;
|
||||
}
|
||||
|
||||
ReferenceProcessorPhaseTimes::~ReferenceProcessorPhaseTimes() {
|
||||
for (int i = 0; i < RefParPhaseMax; i++) {
|
||||
delete _worker_time_sec[i];
|
||||
}
|
||||
}
|
||||
|
||||
double ReferenceProcessorPhaseTimes::ref_proc_time_ms(ReferenceType ref_type) const {
|
||||
ASSERT_REF_TYPE(ref_type);
|
||||
return _par_phase_time_ms[ref_type_2_index(ref_type)];
|
||||
}
|
||||
|
||||
void ReferenceProcessorPhaseTimes::set_ref_proc_time_ms(ReferenceType ref_type,
|
||||
double ref_proc_time_ms) {
|
||||
ASSERT_REF_TYPE(ref_type);
|
||||
_ref_proc_time_ms[ref_type_2_index(ref_type)] = ref_proc_time_ms;
|
||||
}
|
||||
|
||||
size_t ReferenceProcessorPhaseTimes::ref_cleared(ReferenceType ref_type) const {
|
||||
ASSERT_REF_TYPE(ref_type);
|
||||
return _ref_cleared[ref_type_2_index(ref_type)];
|
||||
}
|
||||
|
||||
void ReferenceProcessorPhaseTimes::set_ref_cleared(ReferenceType ref_type, size_t count) {
|
||||
ASSERT_REF_TYPE(ref_type);
|
||||
_ref_cleared[ref_type_2_index(ref_type)] = count;
|
||||
}
|
||||
|
||||
size_t ReferenceProcessorPhaseTimes::ref_discovered(ReferenceType ref_type) const {
|
||||
ASSERT_REF_TYPE(ref_type);
|
||||
return _ref_discovered[ref_type_2_index(ref_type)];
|
||||
}
|
||||
|
||||
void ReferenceProcessorPhaseTimes::set_ref_discovered(ReferenceType ref_type, size_t count) {
|
||||
ASSERT_REF_TYPE(ref_type);
|
||||
_ref_discovered[ref_type_2_index(ref_type)] = count;
|
||||
}
|
||||
|
||||
size_t ReferenceProcessorPhaseTimes::ref_enqueued(ReferenceType ref_type) const {
|
||||
ASSERT_REF_TYPE(ref_type);
|
||||
return _ref_enqueued[ref_type_2_index(ref_type)];
|
||||
}
|
||||
|
||||
void ReferenceProcessorPhaseTimes::set_ref_enqueued(ReferenceType ref_type, size_t count) {
|
||||
ASSERT_REF_TYPE(ref_type);
|
||||
_ref_enqueued[ref_type_2_index(ref_type)] = count;
|
||||
}
|
||||
|
||||
double ReferenceProcessorPhaseTimes::balance_queues_time_ms(ReferenceType ref_type) const {
|
||||
ASSERT_REF_TYPE(ref_type);
|
||||
return _balance_queues_time_ms[ref_type_2_index(ref_type)];
|
||||
}
|
||||
|
||||
void ReferenceProcessorPhaseTimes::set_balance_queues_time_ms(ReferenceType ref_type, double time_ms) {
|
||||
ASSERT_REF_TYPE(ref_type);
|
||||
_balance_queues_time_ms[ref_type_2_index(ref_type)] = time_ms;
|
||||
}
|
||||
|
||||
ReferenceProcessorPhaseTimes::RefProcParPhases
|
||||
ReferenceProcessorPhaseTimes::par_phase(RefProcPhaseNumbers phase_number) const {
|
||||
ASSERT_PHASE_NUMBER(phase_number);
|
||||
ASSERT_REF_TYPE(_processing_ref_type);
|
||||
|
||||
int result = SoftRefPhase1;
|
||||
|
||||
switch(_processing_ref_type) {
|
||||
case REF_SOFT:
|
||||
result = (int)SoftRefPhase1;
|
||||
result += phase_number;
|
||||
|
||||
assert((RefProcParPhases)result >= SoftRefPhase1 &&
|
||||
(RefProcParPhases)result <= SoftRefPhase3,
|
||||
"Invariant (%d)", result);
|
||||
break;
|
||||
case REF_WEAK:
|
||||
result = (int)WeakRefPhase2;
|
||||
result += (phase_number - 1);
|
||||
assert((RefProcParPhases)result >= WeakRefPhase2 &&
|
||||
(RefProcParPhases)result <= WeakRefPhase3,
|
||||
"Invariant (%d)", result);
|
||||
break;
|
||||
case REF_FINAL:
|
||||
result = (int)FinalRefPhase2;
|
||||
result += (phase_number - 1);
|
||||
assert((RefProcParPhases)result >= FinalRefPhase2 &&
|
||||
(RefProcParPhases)result <= FinalRefPhase3,
|
||||
"Invariant (%d)", result);
|
||||
break;
|
||||
case REF_PHANTOM:
|
||||
result = (int)PhantomRefPhase2;
|
||||
result += (phase_number - 1);
|
||||
assert((RefProcParPhases)result >= PhantomRefPhase2 &&
|
||||
(RefProcParPhases)result <= PhantomRefPhase3,
|
||||
"Invariant (%d)", result);
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
ASSERT_PAR_PHASE(result);
|
||||
|
||||
return (RefProcParPhases)result;
|
||||
}
|
||||
|
||||
void ReferenceProcessorPhaseTimes::print_enqueue_phase(uint base_indent, bool print_total) const {
|
||||
if (print_total) {
|
||||
print_phase(RefEnqueue, base_indent);
|
||||
}
|
||||
|
||||
log_debug(gc, phases, ref)("%sReference Counts: Soft: " SIZE_FORMAT " Weak: " SIZE_FORMAT
|
||||
" Final: " SIZE_FORMAT " Phantom: " SIZE_FORMAT ,
|
||||
Indents[base_indent + 1], ref_enqueued(REF_SOFT), ref_enqueued(REF_WEAK),
|
||||
ref_enqueued(REF_FINAL), ref_enqueued(REF_PHANTOM));
|
||||
}
|
||||
|
||||
#define TIME_FORMAT "%.1lfms"
|
||||
|
||||
void ReferenceProcessorPhaseTimes::print_all_references(uint base_indent, bool print_total) const {
|
||||
if (print_total) {
|
||||
LogTarget(Debug, gc, phases, ref) lt;
|
||||
|
||||
if (lt.is_enabled()) {
|
||||
LogStream ls(lt);
|
||||
ls.print_cr("%s%s: " TIME_FORMAT,
|
||||
Indents[base_indent], "Reference Processing", total_time_ms());
|
||||
}
|
||||
}
|
||||
|
||||
uint next_indent = base_indent + 1;
|
||||
print_reference(REF_SOFT, next_indent);
|
||||
print_reference(REF_WEAK, next_indent);
|
||||
print_reference(REF_FINAL, next_indent);
|
||||
print_reference(REF_PHANTOM, next_indent);
|
||||
}
|
||||
|
||||
void ReferenceProcessorPhaseTimes::print_reference(ReferenceType ref_type, uint base_indent) const {
|
||||
LogTarget(Debug, gc, phases, ref) lt;
|
||||
|
||||
if (lt.is_enabled()) {
|
||||
LogStream ls(lt);
|
||||
uint next_indent = base_indent + 1;
|
||||
ResourceMark rm;
|
||||
|
||||
ls.print_cr("%s%s: " TIME_FORMAT,
|
||||
Indents[base_indent], ref_type_2_string(ref_type), ref_proc_time_ms(ref_type));
|
||||
|
||||
double balance_time = balance_queues_time_ms(ref_type);
|
||||
if (balance_time != uninitialized()) {
|
||||
ls.print_cr("%s%s " TIME_FORMAT, Indents[next_indent], "Balance queues:", balance_time);
|
||||
}
|
||||
|
||||
switch(ref_type) {
|
||||
case REF_SOFT:
|
||||
print_phase(SoftRefPhase1, next_indent);
|
||||
print_phase(SoftRefPhase2, next_indent);
|
||||
print_phase(SoftRefPhase3, next_indent);
|
||||
break;
|
||||
|
||||
case REF_WEAK:
|
||||
print_phase(WeakRefPhase2, next_indent);
|
||||
print_phase(WeakRefPhase3, next_indent);
|
||||
break;
|
||||
|
||||
case REF_FINAL:
|
||||
print_phase(FinalRefPhase2, next_indent);
|
||||
print_phase(FinalRefPhase3, next_indent);
|
||||
break;
|
||||
|
||||
case REF_PHANTOM:
|
||||
print_phase(PhantomRefPhase2, next_indent);
|
||||
print_phase(PhantomRefPhase3, next_indent);
|
||||
break;
|
||||
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
ls.print_cr("%s%s " SIZE_FORMAT, Indents[next_indent], "Discovered:", ref_discovered(ref_type));
|
||||
ls.print_cr("%s%s " SIZE_FORMAT, Indents[next_indent], "Cleared:", ref_cleared(ref_type));
|
||||
}
|
||||
}
|
||||
|
||||
void ReferenceProcessorPhaseTimes::print_phase(RefProcParPhases phase, uint indent) const {
|
||||
double phase_time = par_phase_time_ms(phase);
|
||||
if (phase_time != uninitialized()) {
|
||||
LogTarget(Debug, gc, phases, ref) lt;
|
||||
|
||||
LogStream ls(lt);
|
||||
|
||||
ls.print_cr("%s%s%s " TIME_FORMAT,
|
||||
Indents[indent],
|
||||
phase_enum_2_phase_string(phase),
|
||||
indent == 0 ? "" : ":", /* 0 indent logs don't need colon. */
|
||||
phase_time);
|
||||
|
||||
LogTarget(Trace, gc, phases, ref) lt2;
|
||||
if (_processing_is_mt && lt2.is_enabled()) {
|
||||
LogStream ls(lt2);
|
||||
|
||||
ls.print("%s", Indents[indent + 1]);
|
||||
// worker_time_sec is recorded in seconds but it will be printed in milliseconds.
|
||||
worker_time_sec(phase)->print_summary_on(&ls, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#undef ASSERT_REF_TYPE
|
||||
#undef ASSERT_PHASE_NUMBER
|
||||
#undef ASSERT_PAR_PHASE
|
||||
#undef TIME_FORMAT
|
215
hotspot/src/share/vm/gc/shared/referenceProcessorPhaseTimes.hpp
Normal file
215
hotspot/src/share/vm/gc/shared/referenceProcessorPhaseTimes.hpp
Normal file
@ -0,0 +1,215 @@
|
||||
/*
|
||||
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_SHARED_REFERENCEPROCESSORPHASETIMES_HPP
|
||||
#define SHARE_VM_GC_SHARED_REFERENCEPROCESSORPHASETIMES_HPP
|
||||
|
||||
#include "gc/shared/referenceProcessorStats.hpp"
|
||||
#include "gc/shared/workerDataArray.inline.hpp"
|
||||
#include "memory/referenceType.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
class DiscoveredList;
|
||||
class GCTimer;
|
||||
|
||||
class ReferenceProcessorPhaseTimes : public CHeapObj<mtGC> {
|
||||
public:
|
||||
// Detailed phases that has parallel work.
|
||||
enum RefProcParPhases {
|
||||
SoftRefPhase1,
|
||||
SoftRefPhase2,
|
||||
SoftRefPhase3,
|
||||
WeakRefPhase2,
|
||||
WeakRefPhase3,
|
||||
FinalRefPhase2,
|
||||
FinalRefPhase3,
|
||||
PhantomRefPhase2,
|
||||
PhantomRefPhase3,
|
||||
RefEnqueue,
|
||||
RefParPhaseMax
|
||||
};
|
||||
|
||||
// Sub-phases that are used when processing each j.l.Reference types.
|
||||
// Only SoftReference has RefPhase1.
|
||||
enum RefProcPhaseNumbers {
|
||||
RefPhase1,
|
||||
RefPhase2,
|
||||
RefPhase3,
|
||||
RefPhaseMax
|
||||
};
|
||||
|
||||
private:
|
||||
static const int number_of_subclasses_of_ref = REF_PHANTOM - REF_OTHER; // 5 - 1 = 4
|
||||
|
||||
// Records per thread information of each phase.
|
||||
WorkerDataArray<double>* _worker_time_sec[RefParPhaseMax];
|
||||
// Records elapsed time of each phase.
|
||||
double _par_phase_time_ms[RefParPhaseMax];
|
||||
|
||||
// Total spent time for references.
|
||||
// e.g. _ref_proc_time_ms[0] = _par_phase_time_ms[SoftRefPhase1] +
|
||||
// _par_phase_time_ms[SoftRefPhase2] +
|
||||
// _par_phase_time_ms[SoftRefPhase3] + extra time.
|
||||
double _ref_proc_time_ms[number_of_subclasses_of_ref];
|
||||
|
||||
double _total_time_ms;
|
||||
|
||||
size_t _ref_cleared[number_of_subclasses_of_ref];
|
||||
size_t _ref_discovered[number_of_subclasses_of_ref];
|
||||
size_t _ref_enqueued[number_of_subclasses_of_ref];
|
||||
double _balance_queues_time_ms[number_of_subclasses_of_ref];
|
||||
|
||||
bool _processing_is_mt;
|
||||
|
||||
// Currently processing reference type.
|
||||
ReferenceType _processing_ref_type;
|
||||
|
||||
GCTimer* _gc_timer;
|
||||
|
||||
double par_phase_time_ms(RefProcParPhases phase) const;
|
||||
double ref_proc_time_ms(ReferenceType ref_type) const;
|
||||
|
||||
double total_time_ms() const { return _total_time_ms; }
|
||||
|
||||
size_t ref_cleared(ReferenceType ref_type) const;
|
||||
size_t ref_enqueued(ReferenceType ref_type) const;
|
||||
|
||||
double balance_queues_time_ms(ReferenceType ref_type) const;
|
||||
|
||||
void print_reference(ReferenceType ref_type, uint base_indent) const;
|
||||
void print_phase(RefProcParPhases phase, uint indent) const;
|
||||
|
||||
public:
|
||||
ReferenceProcessorPhaseTimes(GCTimer* gc_timer, uint max_gc_threads);
|
||||
~ReferenceProcessorPhaseTimes();
|
||||
|
||||
static double uninitialized() { return -1.0; }
|
||||
|
||||
WorkerDataArray<double>* worker_time_sec(RefProcParPhases phase) const;
|
||||
void set_par_phase_time_ms(RefProcParPhases phase, double par_phase_time_ms);
|
||||
|
||||
void set_ref_proc_time_ms(ReferenceType ref_type, double ref_proc_time_ms);
|
||||
|
||||
void set_total_time_ms(double total_time_ms) { _total_time_ms = total_time_ms; }
|
||||
|
||||
void set_ref_cleared(ReferenceType ref_type, size_t count);
|
||||
size_t ref_discovered(ReferenceType ref_type) const;
|
||||
void set_ref_discovered(ReferenceType ref_type, size_t count);
|
||||
void set_ref_enqueued(ReferenceType ref_type, size_t count);
|
||||
|
||||
void set_balance_queues_time_ms(ReferenceType ref_type, double time_ms);
|
||||
|
||||
void set_processing_is_mt(bool processing_is_mt) { _processing_is_mt = processing_is_mt; }
|
||||
|
||||
ReferenceType processing_ref_type() const { return _processing_ref_type; }
|
||||
void set_processing_ref_type(ReferenceType processing_ref_type) { _processing_ref_type = processing_ref_type; }
|
||||
|
||||
// Returns RefProcParPhases calculated from phase_number and _processing_ref_type.
|
||||
RefProcParPhases par_phase(RefProcPhaseNumbers phase_number) const;
|
||||
|
||||
GCTimer* gc_timer() const { return _gc_timer; }
|
||||
|
||||
// Reset all fields. If not reset at next cycle, an assertion will fail.
|
||||
void reset();
|
||||
|
||||
void print_enqueue_phase(uint base_indent = 0, bool print_total = true) const;
|
||||
void print_all_references(uint base_indent = 0, bool print_total = true) const;
|
||||
};
|
||||
|
||||
// Updates working time of each worker thread.
|
||||
class RefProcWorkerTimeTracker : public StackObj {
|
||||
protected:
|
||||
WorkerDataArray<double>* _worker_time;
|
||||
double _start_time;
|
||||
uint _worker_id;
|
||||
|
||||
public:
|
||||
RefProcWorkerTimeTracker(ReferenceProcessorPhaseTimes::RefProcPhaseNumbers number,
|
||||
ReferenceProcessorPhaseTimes* phase_times,
|
||||
uint worker_id);
|
||||
RefProcWorkerTimeTracker(ReferenceProcessorPhaseTimes::RefProcParPhases phase,
|
||||
ReferenceProcessorPhaseTimes* phase_times,
|
||||
uint worker_id);
|
||||
~RefProcWorkerTimeTracker();
|
||||
};
|
||||
|
||||
class RefProcPhaseTimeBaseTracker : public StackObj {
|
||||
protected:
|
||||
const char* _title;
|
||||
ReferenceProcessorPhaseTimes* _phase_times;
|
||||
Ticks _start_ticks;
|
||||
Ticks _end_ticks;
|
||||
|
||||
Ticks end_ticks();
|
||||
double elapsed_time();
|
||||
ReferenceProcessorPhaseTimes* phase_times() const { return _phase_times; }
|
||||
// Print phase elapsed time with each worker information if MT processed.
|
||||
void print_phase(ReferenceProcessorPhaseTimes::RefProcParPhases phase, uint indent);
|
||||
|
||||
public:
|
||||
RefProcPhaseTimeBaseTracker(const char* title,
|
||||
ReferenceProcessorPhaseTimes* phase_times);
|
||||
~RefProcPhaseTimeBaseTracker();
|
||||
};
|
||||
|
||||
// Updates queue balance time at ReferenceProcessorPhaseTimes and
|
||||
// save it into GCTimer.
|
||||
class RefProcBalanceQueuesTimeTracker : public RefProcPhaseTimeBaseTracker {
|
||||
public:
|
||||
RefProcBalanceQueuesTimeTracker(ReferenceProcessorPhaseTimes* phase_times);
|
||||
~RefProcBalanceQueuesTimeTracker();
|
||||
};
|
||||
|
||||
// Updates phase time at ReferenceProcessorPhaseTimes and save it into GCTimer.
|
||||
class RefProcParPhaseTimeTracker : public RefProcPhaseTimeBaseTracker {
|
||||
ReferenceProcessorPhaseTimes::RefProcPhaseNumbers _phase_number;
|
||||
|
||||
public:
|
||||
RefProcParPhaseTimeTracker(ReferenceProcessorPhaseTimes::RefProcPhaseNumbers phase_number,
|
||||
ReferenceProcessorPhaseTimes* phase_times);
|
||||
~RefProcParPhaseTimeTracker();
|
||||
};
|
||||
|
||||
// Updates phase time related information.
|
||||
// - Each phase processing time, cleared/discovered reference counts and stats for each working threads if MT processed.
|
||||
class RefProcPhaseTimesTracker : public RefProcPhaseTimeBaseTracker {
|
||||
ReferenceProcessor* _rp;
|
||||
|
||||
public:
|
||||
RefProcPhaseTimesTracker(ReferenceType ref_type,
|
||||
ReferenceProcessorPhaseTimes* phase_times,
|
||||
ReferenceProcessor* rp);
|
||||
~RefProcPhaseTimesTracker();
|
||||
};
|
||||
|
||||
// Updates enqueue time related information.
|
||||
// - Enqueueing time, enqueued reference count and stats for each working thread if MT processed.
|
||||
class RefProcEnqueueTimeTracker : public RefProcPhaseTimeBaseTracker {
|
||||
public:
|
||||
RefProcEnqueueTimeTracker(ReferenceProcessorPhaseTimes* phase_times,
|
||||
ReferenceProcessorStats& stats);
|
||||
~RefProcEnqueueTimeTracker();
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_SHARED_REFERENCEPROCESSORPHASETIMES_HPP
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,7 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/workerDataArray.inline.hpp"
|
||||
#include "gc/shared/workerDataArray.inline.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
template <>
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,8 +22,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_G1_WORKERDATAARRAY_HPP
|
||||
#define SHARE_VM_GC_G1_WORKERDATAARRAY_HPP
|
||||
#ifndef SHARE_VM_GC_SHARED_WORKERDATAARRAY_HPP
|
||||
#define SHARE_VM_GC_SHARED_WORKERDATAARRAY_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
@ -88,4 +88,4 @@ private:
|
||||
void print_details_on(outputStream* out) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_WORKERDATAARRAY_HPP
|
||||
#endif // SHARE_VM_GC_SHARED_WORKERDATAARRAY_HPP
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,10 +22,10 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_G1_WORKERDATAARRAY_INLINE_HPP
|
||||
#define SHARE_VM_GC_G1_WORKERDATAARRAY_INLINE_HPP
|
||||
#ifndef SHARE_VM_GC_SHARED_WORKERDATAARRAY_INLINE_HPP
|
||||
#define SHARE_VM_GC_SHARED_WORKERDATAARRAY_INLINE_HPP
|
||||
|
||||
#include "gc/g1/workerDataArray.hpp"
|
||||
#include "gc/shared/workerDataArray.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
@ -166,4 +166,4 @@ void WorkerDataArray<T>::reset() {
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_G1_WORKERDATAARRAY_INLINE_HPP
|
||||
#endif // SHARE_VM_GC_SHARED_WORKERDATAARRAY_INLINE_HPP
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -66,6 +66,7 @@ DEBUG_ONLY(size_t Test_log_prefix_prefixer(char* buf, size_t len);)
|
||||
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, metaspace)) \
|
||||
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, mmu)) \
|
||||
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases)) \
|
||||
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases, ref)) \
|
||||
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases, start)) \
|
||||
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases, task)) \
|
||||
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, plab)) \
|
||||
|
@ -1,6 +1,6 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!--
|
||||
Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
|
||||
This code is free software; you can redistribute it and/or modify it
|
||||
@ -463,6 +463,11 @@ Declares a structure type that can be used in other events.
|
||||
<value type="STRING" field="name" label="Name" />
|
||||
</event>
|
||||
|
||||
<event id="GCPhasePauseLevel4" path="vm/gc/phases/pause_level_4" label="GC Phase Pause Level 4" has_thread="true">
|
||||
<value type="UINT" field="gcId" label="GC Identifier" relation="GcId"/>
|
||||
<value type="STRING" field="name" label="Name" />
|
||||
</event>
|
||||
|
||||
<event id="GCPhaseConcurrent" path="vm/gc/phases/concurrent" label="GC Phase Concurrent" has_thread="true">
|
||||
<value type="UINT" field="gcId" label="GC Identifier" relation="GcId"/>
|
||||
<value type="STRING" field="name" label="Name" />
|
||||
|
@ -142,6 +142,9 @@ public class TestGCLogMessages {
|
||||
// TLAB handling
|
||||
new LogMessageWithLevel("Prepare TLABs", Level.DEBUG),
|
||||
new LogMessageWithLevel("Resize TLABs", Level.DEBUG),
|
||||
// Reference Processing
|
||||
new LogMessageWithLevel("Reference Processing", Level.DEBUG),
|
||||
new LogMessageWithLevel("Reference Enqueuing", Level.DEBUG),
|
||||
|
||||
new LogMessageWithLevelC2OrJVMCIOnly("DerivedPointerTable Update", Level.DEBUG),
|
||||
new LogMessageWithLevel("Start New Collection Set", Level.DEBUG),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,31 +31,73 @@
|
||||
* java.management
|
||||
*/
|
||||
|
||||
import java.lang.ref.SoftReference;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
import jdk.test.lib.process.ProcessTools;
|
||||
|
||||
public class TestPrintReferences {
|
||||
public static void main(String[] args) throws Exception {
|
||||
ProcessBuilder pb_enabled =
|
||||
ProcessTools.createJavaProcessBuilder("-Xlog:gc+ref=debug", "-Xmx10M", GCTest.class.getName());
|
||||
ProcessBuilder pb_enabled = ProcessTools.createJavaProcessBuilder("-Xlog:gc+phases+ref=debug",
|
||||
"-XX:+UseG1GC",
|
||||
"-Xmx10M",
|
||||
GCTest.class.getName());
|
||||
OutputAnalyzer output = new OutputAnalyzer(pb_enabled.start());
|
||||
|
||||
String countRegex = "[0-9]+ refs";
|
||||
String indent_4 = " ";
|
||||
String indent_6 = " ";
|
||||
String indent_8 = " ";
|
||||
String gcLogTimeRegex = ".* GC\\([0-9]+\\) ";
|
||||
String countRegex = "[0-9]+";
|
||||
String timeRegex = "[0-9]+[.,][0-9]+ms";
|
||||
String totalRegex = gcLogTimeRegex + indent_4 + "Reference Processing: " + timeRegex + "\n";
|
||||
String balanceRegex = gcLogTimeRegex + indent_8 + "Balance queues: " + timeRegex + "\n";
|
||||
String softRefRegex = gcLogTimeRegex + indent_6 + "SoftReference: " + timeRegex + "\n";
|
||||
String weakRefRegex = gcLogTimeRegex + indent_6 + "WeakReference: " + timeRegex + "\n";
|
||||
String finalRefRegex = gcLogTimeRegex + indent_6 + "FinalReference: " + timeRegex + "\n";
|
||||
String phantomRefRegex = gcLogTimeRegex + indent_6 + "PhantomReference: " + timeRegex + "\n";
|
||||
String refDetailRegex = gcLogTimeRegex + indent_8 + "Phase2: " + timeRegex + "\n" +
|
||||
gcLogTimeRegex + indent_8 + "Phase3: " + timeRegex + "\n" +
|
||||
gcLogTimeRegex + indent_8 + "Discovered: " + countRegex + "\n" +
|
||||
gcLogTimeRegex + indent_8 + "Cleared: " + countRegex + "\n";
|
||||
String softRefDetailRegex = gcLogTimeRegex + indent_8 + "Phase1: " + timeRegex + "\n" + refDetailRegex;
|
||||
String enqueueRegex = gcLogTimeRegex + indent_4 + "Reference Enqueuing: " + timeRegex + "\n";
|
||||
String enqueueDetailRegex = gcLogTimeRegex + indent_6 + "Reference Counts: Soft: " + countRegex +
|
||||
" Weak: " + countRegex + " Final: " + countRegex + " Phantom: " + countRegex + "\n";
|
||||
|
||||
output.shouldMatch(".* GC\\([0-9]+\\) SoftReference " + timeRegex + "\n" +
|
||||
".* GC\\([0-9]+\\) WeakReference " + timeRegex + "\n" +
|
||||
".* GC\\([0-9]+\\) FinalReference " + timeRegex + "\n" +
|
||||
".* GC\\([0-9]+\\) PhantomReference " + timeRegex + "\n" +
|
||||
".* GC\\([0-9]+\\) JNI Weak Reference " + timeRegex + "\n" +
|
||||
".* GC\\([0-9]+\\) Ref Counts: Soft: [0-9]+ Weak: [0-9]+ Final: [0-9]+ Phantom: [0-9]+\n");
|
||||
output.shouldMatch(/* Total Reference processing time */
|
||||
totalRegex +
|
||||
/* SoftReference processing */
|
||||
softRefRegex + balanceRegex + softRefDetailRegex +
|
||||
/* WeakReference processing */
|
||||
weakRefRegex + balanceRegex + refDetailRegex +
|
||||
/* FinalReference processing */
|
||||
finalRefRegex + balanceRegex + refDetailRegex +
|
||||
/* PhantomReference processing */
|
||||
phantomRefRegex + balanceRegex + refDetailRegex +
|
||||
/* Total Enqueuing time */
|
||||
enqueueRegex +
|
||||
/* Enqueued Stats */
|
||||
enqueueDetailRegex
|
||||
);
|
||||
|
||||
output.shouldHaveExitValue(0);
|
||||
}
|
||||
|
||||
static class GCTest {
|
||||
static final int M = 1024 * 1024;
|
||||
|
||||
public static void main(String [] args) {
|
||||
System.gc();
|
||||
|
||||
ArrayList arrSoftRefs = new ArrayList();
|
||||
|
||||
// Populate to triger GC and then Reference related logs will be printed.
|
||||
for (int i = 0; i < 10; i++) {
|
||||
byte[] tmp = new byte[M];
|
||||
|
||||
arrSoftRefs.add(new SoftReference(tmp));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,7 +22,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/workerDataArray.inline.hpp"
|
||||
#include "gc/shared/workerDataArray.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "unittest.hpp"
|
||||
#include "utilities/ostream.hpp"
|
Loading…
Reference in New Issue
Block a user