This commit is contained in:
Jon Masamitsu 2014-06-23 14:20:20 -07:00
commit 633706fa4e
122 changed files with 635 additions and 286 deletions

View File

@ -37,6 +37,7 @@
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/interfaceSupport.hpp"

View File

@ -24,17 +24,13 @@
*/
// no precompiled headers
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "runtime/osThread.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
#ifdef TARGET_ARCH_ppc
# include "assembler_ppc.inline.hpp"
#endif
void OSThread::pd_initialize() {
assert(this != NULL, "check");

View File

@ -48,6 +48,7 @@
#include "prims/jvm.h"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.hpp"

View File

@ -41,6 +41,7 @@
#include "prims/jvm.h"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.hpp"

View File

@ -41,6 +41,7 @@
#include "prims/jvm.h"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.hpp"

View File

@ -23,7 +23,6 @@
*/
// no precompiled headers
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"

View File

@ -41,6 +41,7 @@
#include "prims/jvm.h"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.hpp"

View File

@ -29,7 +29,6 @@
#error "This file should only be included from thread.inline.hpp"
#endif
#include "runtime/atomic.inline.hpp"
#include "runtime/thread.hpp"
#include "runtime/threadLocalStorage.hpp"

View File

@ -23,7 +23,6 @@
*/
// no precompiled headers
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"

View File

@ -44,6 +44,7 @@
#include "prims/jvm.h"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.hpp"

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"

View File

@ -38,6 +38,7 @@
#include "prims/jvm.h"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/interfaceSupport.hpp"

View File

@ -26,7 +26,6 @@
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/icache.hpp"
#include "runtime/os.hpp"

View File

@ -47,6 +47,7 @@
#include "memory/resourceArea.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/interfaceSupport.hpp"

View File

@ -57,6 +57,7 @@
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/mutex.hpp"
#include "runtime/safepoint.hpp"

View File

@ -33,6 +33,7 @@
#include "memory/gcLocker.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "utilities/hashtable.inline.hpp"
#if INCLUDE_ALL_GCS

View File

@ -33,6 +33,7 @@
#include "memory/gcLocker.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "utilities/hashtable.inline.hpp"

View File

@ -37,6 +37,7 @@
#include "oops/methodData.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "prims/jvmtiImpl.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/sweeper.hpp"
@ -668,8 +669,10 @@ nmethod::nmethod(
_hotness_counter = NMethodSweeper::hotness_counter_reset_val();
code_buffer->copy_values_to(this);
if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
CodeCache::add_scavenge_root_nmethod(this);
if (ScavengeRootsInCode) {
if (detect_scavenge_root_oops()) {
CodeCache::add_scavenge_root_nmethod(this);
}
Universe::heap()->register_nmethod(this);
}
debug_only(verify_scavenge_root_oops());
@ -753,8 +756,10 @@ nmethod::nmethod(
_hotness_counter = NMethodSweeper::hotness_counter_reset_val();
code_buffer->copy_values_to(this);
if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
CodeCache::add_scavenge_root_nmethod(this);
if (ScavengeRootsInCode) {
if (detect_scavenge_root_oops()) {
CodeCache::add_scavenge_root_nmethod(this);
}
Universe::heap()->register_nmethod(this);
}
DEBUG_ONLY(verify_scavenge_root_oops();)
@ -869,8 +874,10 @@ nmethod::nmethod(
code_buffer->copy_values_to(this);
debug_info->copy_to(this);
dependencies->copy_to(this);
if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
CodeCache::add_scavenge_root_nmethod(this);
if (ScavengeRootsInCode) {
if (detect_scavenge_root_oops()) {
CodeCache::add_scavenge_root_nmethod(this);
}
Universe::heap()->register_nmethod(this);
}
debug_only(verify_scavenge_root_oops());

View File

@ -36,6 +36,7 @@
#include "oops/oop.inline.hpp"
#include "prims/nativeLookup.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/init.hpp"
#include "runtime/interfaceSupport.hpp"

View File

@ -56,6 +56,7 @@
#include "memory/tenuredGeneration.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
@ -2024,7 +2025,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
"collections passed to foreground collector", _full_gcs_since_conc_gc);
@ -2534,8 +2535,10 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Caus
assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
"VM thread should have CMS token");
// The gc id is created in register_foreground_gc_start if this collection is synchronous
const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
true, NULL);)
true, NULL, gc_id);)
if (UseAdaptiveSizePolicy) {
size_policy()->ms_collection_begin();
}
@ -3538,6 +3541,7 @@ class CMSPhaseAccounting: public StackObj {
public:
CMSPhaseAccounting(CMSCollector *collector,
const char *phase,
const GCId gc_id,
bool print_cr = true);
~CMSPhaseAccounting();
@ -3546,6 +3550,7 @@ class CMSPhaseAccounting: public StackObj {
const char *_phase;
elapsedTimer _wallclock;
bool _print_cr;
const GCId _gc_id;
public:
// Not MT-safe; so do not pass around these StackObj's
@ -3561,15 +3566,15 @@ class CMSPhaseAccounting: public StackObj {
CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
const char *phase,
const GCId gc_id,
bool print_cr) :
_collector(collector), _phase(phase), _print_cr(print_cr) {
_collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) {
if (PrintCMSStatistics != 0) {
_collector->resetYields();
}
if (PrintGCDetails) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->gclog_stamp(_gc_id);
gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
_collector->cmsGen()->short_name(), _phase);
}
@ -3583,8 +3588,7 @@ CMSPhaseAccounting::~CMSPhaseAccounting() {
_collector->stopTimer();
_wallclock.stop();
if (PrintGCDetails) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->gclog_stamp(_gc_id);
gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
_collector->cmsGen()->short_name(),
_phase, _collector->timerValue(), _wallclock.seconds());
@ -3682,7 +3686,7 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
setup_cms_unloading_and_verification_state();
NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
PrintGCDetails && Verbose, true, _gc_timer_cm);)
PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
if (UseAdaptiveSizePolicy) {
size_policy()->checkpoint_roots_initial_begin();
}
@ -3799,7 +3803,7 @@ bool CMSCollector::markFromRoots(bool asynch) {
CMSTokenSyncWithLocks ts(true, bitMapLock());
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
res = markFromRootsWork(asynch);
if (res) {
_collectorState = Precleaning;
@ -4522,7 +4526,7 @@ void CMSCollector::preclean() {
_start_sampling = false;
}
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
}
CMSTokenSync x(true); // is cms thread
@ -4551,7 +4555,7 @@ void CMSCollector::abortable_preclean() {
// we will never do an actual abortable preclean cycle.
if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
// We need more smarts in the abortable preclean
// loop below to deal with cases where allocation
// in young gen is very very slow, and our precleaning
@ -4696,7 +4700,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
rp->preclean_discovered_references(
rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
gc_timer);
gc_timer, _gc_tracer_cm->gc_id());
}
if (clean_survivor) { // preclean the active survivor space(s)
@ -5039,7 +5043,7 @@ void CMSCollector::checkpointRootsFinal(bool asynch,
// expect it to be false and set to true
FlagSetting fl(gch->_is_gc_active, false);
NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
PrintGCDetails && Verbose, true, _gc_timer_cm);)
PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
int level = _cmsGen->level() - 1;
if (level >= 0) {
gch->do_collection(true, // full (i.e. force, see below)
@ -5068,7 +5072,7 @@ void CMSCollector::checkpointRootsFinal(bool asynch,
void CMSCollector::checkpointRootsFinalWork(bool asynch,
bool clear_all_soft_refs, bool init_mark_was_synchronous) {
NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
assert(haveFreelistLocks(), "must have free list locks");
assert_lock_strong(bitMapLock());
@ -5123,11 +5127,11 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
// the most recent young generation GC, minus those cleaned up by the
// concurrent precleaning.
if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
do_remark_parallel();
} else {
GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
_gc_timer_cm);
_gc_timer_cm, _gc_tracer_cm->gc_id());
do_remark_non_parallel();
}
}
@ -5140,7 +5144,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
verify_overflow_empty();
{
NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
refProcessingWork(asynch, clear_all_soft_refs);
}
verify_work_stacks_empty();
@ -5921,7 +5925,7 @@ void CMSCollector::do_remark_non_parallel() {
NULL, // space is set further below
&_markBitMap, &_markStack, &mrias_cl);
{
GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
// Iterate over the dirty cards, setting the corresponding bits in the
// mod union table.
{
@ -5958,7 +5962,7 @@ void CMSCollector::do_remark_non_parallel() {
Universe::verify();
}
{
GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
verify_work_stacks_empty();
@ -5978,7 +5982,7 @@ void CMSCollector::do_remark_non_parallel() {
}
{
GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
verify_work_stacks_empty();
@ -5997,7 +6001,7 @@ void CMSCollector::do_remark_non_parallel() {
}
{
GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
verify_work_stacks_empty();
@ -6199,7 +6203,7 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
_span, &_markBitMap, &_markStack,
&cmsKeepAliveClosure, false /* !preclean */);
{
GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
ReferenceProcessorStats stats;
if (rp->processing_is_mt()) {
@ -6224,13 +6228,15 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
&cmsKeepAliveClosure,
&cmsDrainMarkingStackClosure,
&task_executor,
_gc_timer_cm);
_gc_timer_cm,
_gc_tracer_cm->gc_id());
} else {
stats = rp->process_discovered_references(&_is_alive_closure,
&cmsKeepAliveClosure,
&cmsDrainMarkingStackClosure,
NULL,
_gc_timer_cm);
_gc_timer_cm,
_gc_tracer_cm->gc_id());
}
_gc_tracer_cm->report_gc_reference_stats(stats);
@ -6241,7 +6247,7 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
if (should_unload_classes()) {
{
GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
@ -6254,7 +6260,7 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
}
{
GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
}
@ -6263,7 +6269,7 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
// CMS doesn't use the StringTable as hard roots when class unloading is turned off.
// Need to check if we really scanned the StringTable.
if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
// Delete entries for dead interned strings.
StringTable::unlink(&_is_alive_closure);
}
@ -6330,7 +6336,7 @@ void CMSCollector::sweep(bool asynch) {
_intra_sweep_timer.start();
if (asynch) {
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
// First sweep the old gen
{
CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
@ -6551,7 +6557,7 @@ void CMSCollector::reset(bool asynch) {
// Clear the mark bitmap (no grey objects to start with)
// for the next cycle.
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
HeapWord* curAddr = _markBitMap.startWord();
while (curAddr < _markBitMap.endWord()) {
@ -6617,7 +6623,7 @@ void CMSCollector::reset(bool asynch) {
void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
TraceCollectorStats tcs(counters());
switch (op) {

View File

@ -56,7 +56,7 @@ void VM_CMS_Operation::release_and_notify_pending_list_lock() {
void VM_CMS_Operation::verify_before_gc() {
if (VerifyBeforeGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm);
GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id());
HandleMark hm;
FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@ -68,7 +68,7 @@ void VM_CMS_Operation::verify_before_gc() {
void VM_CMS_Operation::verify_after_gc() {
if (VerifyAfterGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm);
GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id());
HandleMark hm;
FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);

View File

@ -28,6 +28,7 @@
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
#include "memory/space.inline.hpp"
#include "runtime/atomic.inline.hpp"
// Even though we don't use the GC efficiency in our heuristics as
// much as we used to, we still order according to GC efficiency. This

View File

@ -45,6 +45,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/prefetch.inline.hpp"
#include "services/memTracker.hpp"
@ -511,6 +512,7 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
_has_overflown(false),
_concurrent(false),
_has_aborted(false),
_aborted_gc_id(GCId::undefined()),
_restart_for_overflow(false),
_concurrent_marking_in_progress(false),
@ -1020,8 +1022,7 @@ void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
force_overflow()->update();
if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->gclog_stamp(concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
}
}
@ -2469,7 +2470,7 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
if (G1Log::finer()) {
gclog_or_tty->put(' ');
}
GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm());
GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id());
ReferenceProcessor* rp = g1h->ref_processor_cm();
@ -2526,7 +2527,8 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
&g1_keep_alive,
&g1_drain_mark_stack,
executor,
g1h->gc_timer_cm());
g1h->gc_timer_cm(),
concurrent_gc_id());
g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
// The do_oop work routines of the keep_alive and drain_marking_stack
@ -3261,6 +3263,12 @@ void ConcurrentMark::abort() {
}
_first_overflow_barrier_sync.abort();
_second_overflow_barrier_sync.abort();
const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
if (!gc_id.is_undefined()) {
// We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
// to detect that it was aborted. Only keep track of the first GC id that we aborted.
_aborted_gc_id = gc_id;
}
_has_aborted = true;
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
@ -3275,6 +3283,13 @@ void ConcurrentMark::abort() {
_g1h->register_concurrent_cycle_end();
}
const GCId& ConcurrentMark::concurrent_gc_id() {
if (has_aborted()) {
return _aborted_gc_id;
}
return _g1h->gc_tracer_cm()->gc_id();
}
static void print_ms_time_info(const char* prefix, const char* name,
NumberSeq& ns) {
gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",

View File

@ -26,6 +26,7 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
#include "gc_implementation/g1/heapRegionSet.hpp"
#include "gc_implementation/shared/gcId.hpp"
#include "utilities/taskqueue.hpp"
class G1CollectedHeap;
@ -444,6 +445,7 @@ protected:
volatile bool _concurrent;
// Set at the end of a Full GC so that marking aborts
volatile bool _has_aborted;
GCId _aborted_gc_id;
// Used when remark aborts due to an overflow to indicate that
// another concurrent marking phase should start
@ -824,6 +826,8 @@ public:
bool has_aborted() { return _has_aborted; }
const GCId& concurrent_gc_id();
// This prints the global/local fingers. It is used for debugging.
NOT_PRODUCT(void print_finger();)

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -29,6 +29,7 @@
#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1MMUTracker.hpp"
#include "gc_implementation/g1/vm_operations_g1.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/vmThread.hpp"
@ -111,8 +112,7 @@ void ConcurrentMarkThread::run() {
double scan_start = os::elapsedTime();
if (!cm()->has_aborted()) {
if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
}
@ -120,8 +120,7 @@ void ConcurrentMarkThread::run() {
double scan_end = os::elapsedTime();
if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]",
scan_end - scan_start);
}
@ -129,8 +128,7 @@ void ConcurrentMarkThread::run() {
double mark_start_sec = os::elapsedTime();
if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-mark-start]");
}
@ -153,8 +151,7 @@ void ConcurrentMarkThread::run() {
}
if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf secs]",
mark_end_sec - mark_start_sec);
}
@ -169,8 +166,7 @@ void ConcurrentMarkThread::run() {
"in remark (restart #%d).", iter);
}
if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]");
}
}
@ -213,8 +209,7 @@ void ConcurrentMarkThread::run() {
double cleanup_start_sec = os::elapsedTime();
if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-cleanup-start]");
}
@ -234,8 +229,7 @@ void ConcurrentMarkThread::run() {
double cleanup_end_sec = os::elapsedTime();
if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf secs]",
cleanup_end_sec - cleanup_start_sec);
}
@ -276,8 +270,7 @@ void ConcurrentMarkThread::run() {
if (cm()->has_aborted()) {
if (G1Log::fine()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-mark-abort]");
}
}

View File

@ -26,7 +26,7 @@
#include "gc_implementation/g1/dirtyCardQueue.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp"

View File

@ -62,6 +62,7 @@
#include "memory/referenceProcessor.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.pcgc.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/prefetch.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/vmThread.hpp"
@ -1298,7 +1299,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
{
GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
TraceCollectorStats tcs(g1mm()->full_collection_counters());
TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
@ -3858,8 +3859,7 @@ void G1CollectedHeap::log_gc_header() {
return;
}
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
.append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
@ -5743,7 +5743,8 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
&keep_alive,
&drain_queue,
NULL,
_gc_timer_stw);
_gc_timer_stw,
_gc_tracer_stw->gc_id());
} else {
// Parallel reference processing
assert(rp->num_q() == no_of_gc_workers, "sanity");
@ -5754,7 +5755,8 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
&keep_alive,
&drain_queue,
&par_task_executor,
_gc_timer_stw);
_gc_timer_stw,
_gc_tracer_stw->gc_id());
}
_gc_tracer_stw->report_gc_reference_stats(stats);
@ -6993,7 +6995,7 @@ public:
return;
}
if (ScavengeRootsInCode && nm->detect_scavenge_root_oops()) {
if (ScavengeRootsInCode) {
_g1h->register_nmethod(nm);
}
}

View File

@ -28,6 +28,7 @@
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
#include "runtime/atomic.inline.hpp"
// Helper class for avoiding interleaved logging
class LineBuffer: public StackObj {

View File

@ -28,7 +28,7 @@
#include "gc_implementation/g1/g1HotCardCache.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomic.inline.hpp"
G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
_g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}

View File

@ -44,6 +44,7 @@
#include "oops/instanceRefKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/synchronizer.hpp"
@ -123,7 +124,7 @@ void G1MarkSweep::allocate_stacks() {
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer());
GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
GenMarkSweep::trace(" 1");
SharedHeap* sh = SharedHeap::heap();
@ -146,7 +147,8 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
&GenMarkSweep::keep_alive,
&GenMarkSweep::follow_stack_closure,
NULL,
gc_timer());
gc_timer(),
gc_tracer()->gc_id());
gc_tracer()->report_gc_reference_stats(stats);
@ -260,7 +262,7 @@ void G1MarkSweep::mark_sweep_phase2() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer());
GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
GenMarkSweep::trace("2");
// find the first region
@ -297,7 +299,7 @@ void G1MarkSweep::mark_sweep_phase3() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
// Adjust the pointers to reflect the new locations
GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer());
GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
GenMarkSweep::trace("3");
SharedHeap* sh = SharedHeap::heap();
@ -358,7 +360,7 @@ void G1MarkSweep::mark_sweep_phase4() {
// to use a higher index (saved from phase2) when verifying perm_gen.
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer());
GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
GenMarkSweep::trace("4");
G1SpaceCompactClosure blk;

View File

@ -26,6 +26,7 @@
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/satbQueue.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/thread.inline.hpp"

View File

@ -31,6 +31,7 @@
#include "gc_implementation/g1/g1StringDedupStat.hpp"
#include "gc_implementation/g1/g1StringDedupTable.hpp"
#include "gc_implementation/g1/g1StringDedupThread.hpp"
#include "runtime/atomic.inline.hpp"
bool G1StringDedup::_enabled = false;
@ -211,3 +212,16 @@ G1StringDedupUnlinkOrOopsDoClosure::~G1StringDedupUnlinkOrOopsDoClosure() {
G1StringDedupTable::finish_rehash(_rehashed_table);
}
}
// Atomically claims the next available queue for exclusive access by
// the current thread. Returns the queue number of the claimed queue.
size_t G1StringDedupUnlinkOrOopsDoClosure::claim_queue() {
return (size_t)Atomic::add_ptr(1, &_next_queue) - 1;
}
// Atomically claims the next available table partition for exclusive
// access by the current thread. Returns the table bucket number where
// the claimed partition starts.
size_t G1StringDedupUnlinkOrOopsDoClosure::claim_table_partition(size_t partition_size) {
return (size_t)Atomic::add_ptr(partition_size, &_next_bucket) - partition_size;
}

View File

@ -84,6 +84,7 @@
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
#include "runtime/atomic.hpp"
class OopClosure;
class BoolObjectClosure;
@ -174,16 +175,12 @@ public:
// Atomically claims the next available queue for exclusive access by
// the current thread. Returns the queue number of the claimed queue.
size_t claim_queue() {
return (size_t)Atomic::add_ptr(1, &_next_queue) - 1;
}
size_t claim_queue();
// Atomically claims the next available table partition for exclusive
// access by the current thread. Returns the table bucket number where
// the claimed partition starts.
size_t claim_table_partition(size_t partition_size) {
return (size_t)Atomic::add_ptr(partition_size, &_next_bucket) - partition_size;
}
size_t claim_table_partition(size_t partition_size);
// Applies and returns the result from the is_alive closure, or
// returns true if no such closure was provided.

View File

@ -26,6 +26,7 @@
#include "classfile/javaClasses.hpp"
#include "gc_implementation/g1/g1StringDedupQueue.hpp"
#include "memory/gcLocker.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "utilities/stack.inline.hpp"

View File

@ -28,6 +28,7 @@
#include "gc_implementation/g1/g1StringDedupTable.hpp"
#include "gc_implementation/g1/g1StringDedupThread.hpp"
#include "gc_implementation/g1/g1StringDedupQueue.hpp"
#include "runtime/atomic.inline.hpp"
G1StringDedupThread* G1StringDedupThread::_thread = NULL;

View File

@ -34,6 +34,7 @@
#include "memory/iterator.hpp"
#include "memory/space.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC

View File

@ -32,6 +32,7 @@
#include "memory/padded.inline.hpp"
#include "memory/space.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"

View File

@ -29,6 +29,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "memory/space.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/mutexLocker.hpp"
#define SPARSE_PRT_VERBOSE 0

View File

@ -226,7 +226,7 @@ void VM_CGC_Operation::release_and_notify_pending_list_lock() {
void VM_CGC_Operation::doit() {
gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm());
GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id());
SharedHeap* sh = SharedHeap::heap();
// This could go away if CollectedHeap gave access to _gc_is_active...
if (sh != NULL) {

View File

@ -47,6 +47,7 @@
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.pcgc.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/handles.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
@ -955,7 +956,7 @@ void ParNewGeneration::collect(bool full,
size_policy->minor_collection_begin();
}
GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
// Capture heap used before collection (for printing).
size_t gch_prev_used = gch->used();
@ -1013,14 +1014,14 @@ void ParNewGeneration::collect(bool full,
ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
stats = rp->process_discovered_references(&is_alive, &keep_alive,
&evacuate_followers, &task_executor,
_gc_timer);
_gc_timer, gc_tracer.gc_id());
} else {
thread_state_set.flush();
gch->set_par_threads(0); // 0 ==> non-parallel.
gch->save_marks();
stats = rp->process_discovered_references(&is_alive, &keep_alive,
&evacuate_followers, NULL,
_gc_timer);
_gc_timer, gc_tracer.gc_id());
}
gc_tracer.report_gc_reference_stats(stats);
if (!promotion_failed()) {

View File

@ -26,6 +26,7 @@
#include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
#include "utilities/bitMap.inline.hpp"
#include "services/memTracker.hpp"

View File

@ -53,7 +53,7 @@ void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
ResourceMark rm;
NOT_PRODUCT(GCTraceTime tm("ThreadRootsMarkingTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
@ -82,7 +82,7 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
assert(Universe::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
@ -153,7 +153,7 @@ void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
assert(Universe::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(GCTraceTime tm("RefProcTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
@ -209,7 +209,7 @@ void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
assert(Universe::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(GCTraceTime tm("StealMarkingTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
@ -242,7 +242,7 @@ void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
assert(Universe::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
@ -309,7 +309,7 @@ UpdateDensePrefixTask::UpdateDensePrefixTask(
void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
NOT_PRODUCT(GCTraceTime tm("UpdateDensePrefixTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
@ -324,7 +324,7 @@ void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
assert(Universe::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);

View File

@ -34,6 +34,7 @@
#include "oops/objArrayKlass.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.pcgc.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "utilities/stack.inline.hpp"
PSOldGen* ParCompactionManager::_old_gen = NULL;

View File

@ -170,7 +170,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id());
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
@ -521,7 +521,7 @@ void PSMarkSweep::deallocate_stacks() {
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer);
GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
trace(" 1");
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@ -556,7 +556,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
ref_processor()->setup_policy(clear_all_softrefs);
const ReferenceProcessorStats& stats =
ref_processor()->process_discovered_references(
is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer);
is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer, _gc_tracer->gc_id());
gc_tracer()->report_gc_reference_stats(stats);
}
@ -582,7 +582,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
void PSMarkSweep::mark_sweep_phase2() {
GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer);
GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
trace("2");
// Now all live objects are marked, compute the new object addresses.
@ -612,7 +612,7 @@ static PSAlwaysTrueClosure always_true;
void PSMarkSweep::mark_sweep_phase3() {
// Adjust the pointers to reflect the new locations
GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer);
GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
trace("3");
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@ -655,7 +655,7 @@ void PSMarkSweep::mark_sweep_phase3() {
void PSMarkSweep::mark_sweep_phase4() {
EventMark m("4 compact heap");
GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer);
GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
trace("4");
// All pointers are now adjusted, move objects accordingly

View File

@ -51,6 +51,7 @@
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.pcgc.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
@ -979,7 +980,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
// at each young gen gc. Do the update unconditionally (even though a
// promotion failure does not swap spaces) because an unknown number of minor
// collections will have swapped the spaces an unknown number of times.
GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer);
GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
ParallelScavengeHeap* heap = gc_heap();
_space_info[from_space_id].set_space(heap->young_gen()->from_space());
_space_info[to_space_id].set_space(heap->young_gen()->to_space());
@ -1022,7 +1023,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
void PSParallelCompact::post_compact()
{
GCTraceTime tm("post compact", print_phases(), true, &_gc_timer);
GCTraceTime tm("post compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
// Clear the marking bitmap, summary data and split info.
@ -1848,7 +1849,7 @@ void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
void PSParallelCompact::summary_phase(ParCompactionManager* cm,
bool maximum_compaction)
{
GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer);
GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
// trace("2");
#ifdef ASSERT
@ -2057,7 +2058,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
@ -2359,7 +2360,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
bool maximum_heap_compaction,
ParallelOldTracer *gc_tracer) {
// Recursively traverse all live objects and mark them
GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer);
GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
ParallelScavengeHeap* heap = gc_heap();
uint parallel_gc_threads = heap->gc_task_manager()->workers();
@ -2374,7 +2375,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
ClassLoaderDataGraph::clear_claimed_marks();
{
GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer);
GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
ParallelScavengeHeap::ParStrongRootsScope psrs;
@ -2403,24 +2404,24 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
// Process reference objects found during marking
{
GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer);
GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
ReferenceProcessorStats stats;
if (ref_processor()->processing_is_mt()) {
RefProcTaskExecutor task_executor;
stats = ref_processor()->process_discovered_references(
is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
&task_executor, &_gc_timer);
&task_executor, &_gc_timer, _gc_tracer.gc_id());
} else {
stats = ref_processor()->process_discovered_references(
is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
&_gc_timer);
&_gc_timer, _gc_tracer.gc_id());
}
gc_tracer->report_gc_reference_stats(stats);
}
GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer);
GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
// This is the point where the entire marking should have completed.
assert(cm->marking_stacks_empty(), "Marking should have completed");
@ -2459,7 +2460,7 @@ static PSAlwaysTrueClosure always_true;
void PSParallelCompact::adjust_roots() {
// Adjust the pointers to reflect the new locations
GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer);
GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
// Need new claim bits when tracing through and adjusting pointers.
ClassLoaderDataGraph::clear_claimed_marks();
@ -2495,7 +2496,7 @@ void PSParallelCompact::adjust_roots() {
void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
uint parallel_gc_threads)
{
GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer);
GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
// Find the threads that are active
unsigned int which = 0;
@ -2569,7 +2570,7 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
uint parallel_gc_threads) {
GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer);
GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
ParallelCompactData& sd = PSParallelCompact::summary_data();
@ -2651,7 +2652,7 @@ void PSParallelCompact::enqueue_region_stealing_tasks(
GCTaskQueue* q,
ParallelTaskTerminator* terminator_ptr,
uint parallel_gc_threads) {
GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer);
GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
// Once a thread has drained it's stack, it should try to steal regions from
// other threads.
@ -2699,7 +2700,7 @@ void PSParallelCompact::write_block_fill_histogram(outputStream* const out)
void PSParallelCompact::compact() {
// trace("5");
GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer);
GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
@ -2716,7 +2717,7 @@ void PSParallelCompact::compact() {
enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
{
GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer);
GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
gc_task_manager()->execute_and_wait(q);
@ -2730,7 +2731,7 @@ void PSParallelCompact::compact() {
{
// Update the deferred objects, if any. Any compaction manager can be used.
GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer);
GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
ParCompactionManager* cm = ParCompactionManager::manager_array(0);
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
update_deferred_objects(cm, SpaceId(id));

View File

@ -1004,6 +1004,10 @@ class PSParallelCompact : AllStatic {
static bool _dwl_initialized;
#endif // #ifdef ASSERT
public:
static ParallelOldTracer* gc_tracer() { return &_gc_tracer; }
private:
static void initialize_space_info();

View File

@ -332,7 +332,7 @@ bool PSScavenge::invoke_no_policy() {
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
@ -398,7 +398,7 @@ bool PSScavenge::invoke_no_policy() {
// We'll use the promotion manager again later.
PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
{
GCTraceTime tm("Scavenge", false, false, &_gc_timer);
GCTraceTime tm("Scavenge", false, false, &_gc_timer, _gc_tracer.gc_id());
ParallelScavengeHeap::ParStrongRootsScope psrs;
GCTaskQueue* q = GCTaskQueue::create();
@ -440,7 +440,7 @@ bool PSScavenge::invoke_no_policy() {
// Process reference objects discovered during scavenge
{
GCTraceTime tm("References", false, false, &_gc_timer);
GCTraceTime tm("References", false, false, &_gc_timer, _gc_tracer.gc_id());
reference_processor()->setup_policy(false); // not always_clear
reference_processor()->set_active_mt_degree(active_workers);
@ -451,10 +451,10 @@ bool PSScavenge::invoke_no_policy() {
PSRefProcTaskExecutor task_executor;
stats = reference_processor()->process_discovered_references(
&_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
&_gc_timer);
&_gc_timer, _gc_tracer.gc_id());
} else {
stats = reference_processor()->process_discovered_references(
&_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
&_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer, _gc_tracer.gc_id());
}
_gc_tracer.report_gc_reference_stats(stats);
@ -469,7 +469,7 @@ bool PSScavenge::invoke_no_policy() {
}
{
GCTraceTime tm("StringTable", false, false, &_gc_timer);
GCTraceTime tm("StringTable", false, false, &_gc_timer, _gc_tracer.gc_id());
// Unlink any dead interned Strings and process the remaining live ones.
PSScavengeRootsClosure root_closure(promotion_manager);
StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
@ -641,7 +641,7 @@ bool PSScavenge::invoke_no_policy() {
NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
{
GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer);
GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer, _gc_tracer.gc_id());
CodeCache::prune_scavenge_root_nmethods();
}

View File

@ -28,6 +28,7 @@
#include "memory/collectorPolicy.hpp"
#include "memory/resourceArea.hpp"
#include "memory/sharedHeap.hpp"
#include "runtime/atomic.inline.hpp"
#include "utilities/copy.hpp"
/* Copyright (c) 1992-2009 Oracle and/or its affiliates, and Stanford University.

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/shared/gcId.hpp"
#include "runtime/safepoint.hpp"
uint GCId::_next_id = 0;
const GCId GCId::create() {
return GCId(_next_id++);
}
const GCId GCId::peek() {
return GCId(_next_id);
}
const GCId GCId::undefined() {
return GCId(UNDEFINED);
}
bool GCId::is_undefined() const {
return _id == UNDEFINED;
}

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCID_HPP
#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCID_HPP
#include "memory/allocation.hpp"
class GCId VALUE_OBJ_CLASS_SPEC {
private:
uint _id;
GCId(uint id) : _id(id) {}
GCId() { } // Unused
static uint _next_id;
static const uint UNDEFINED = (uint)-1;
public:
uint id() const {
assert(_id != UNDEFINED, "Using undefined GC ID");
return _id;
}
bool is_undefined() const;
static const GCId create();
static const GCId peek();
static const GCId undefined();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCID_HPP

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp"
#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gcId.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/objectCountEventSender.hpp"
@ -38,19 +39,14 @@
#include "gc_implementation/g1/evacuationInfo.hpp"
#endif
#define assert_unset_gc_id() assert(_shared_gc_info.id() == SharedGCInfo::UNSET_GCID, "GC already started?")
#define assert_set_gc_id() assert(_shared_gc_info.id() != SharedGCInfo::UNSET_GCID, "GC not started?")
static GCId GCTracer_next_gc_id = 0;
static GCId create_new_gc_id() {
return GCTracer_next_gc_id++;
}
#define assert_unset_gc_id() assert(_shared_gc_info.gc_id().is_undefined(), "GC already started?")
#define assert_set_gc_id() assert(!_shared_gc_info.gc_id().is_undefined(), "GC not started?")
void GCTracer::report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp) {
assert_unset_gc_id();
GCId gc_id = create_new_gc_id();
_shared_gc_info.set_id(gc_id);
GCId gc_id = GCId::create();
_shared_gc_info.set_gc_id(gc_id);
_shared_gc_info.set_cause(cause);
_shared_gc_info.set_start_timestamp(timestamp);
}
@ -62,7 +58,7 @@ void GCTracer::report_gc_start(GCCause::Cause cause, const Ticks& timestamp) {
}
bool GCTracer::has_reported_gc_start() const {
return _shared_gc_info.id() != SharedGCInfo::UNSET_GCID;
return !_shared_gc_info.gc_id().is_undefined();
}
void GCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
@ -81,7 +77,7 @@ void GCTracer::report_gc_end(const Ticks& timestamp, TimePartitions* time_partit
report_gc_end_impl(timestamp, time_partitions);
_shared_gc_info.set_id(SharedGCInfo::UNSET_GCID);
_shared_gc_info.set_gc_id(GCId::undefined());
}
void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) const {
@ -132,7 +128,7 @@ void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) {
if (!cit.allocation_failed()) {
HeapInspection hi(false, false, false, NULL);
hi.populate_table(&cit, is_alive_cl);
ObjectCountEventSenderClosure event_sender(_shared_gc_info.id(), cit.size_of_instances_in_words(), Ticks::now());
ObjectCountEventSenderClosure event_sender(_shared_gc_info.gc_id(), cit.size_of_instances_in_words(), Ticks::now());
cit.iterate(&event_sender);
}
}

View File

@ -27,6 +27,7 @@
#include "gc_interface/gcCause.hpp"
#include "gc_interface/gcName.hpp"
#include "gc_implementation/shared/gcId.hpp"
#include "gc_implementation/shared/gcWhen.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp"
#include "memory/allocation.hpp"
@ -38,7 +39,6 @@
#include "utilities/macros.hpp"
#include "utilities/ticks.hpp"
typedef uint GCId;
class EvacuationInfo;
class GCHeapSummary;
@ -50,11 +50,8 @@ class TimePartitions;
class BoolObjectClosure;
class SharedGCInfo VALUE_OBJ_CLASS_SPEC {
public:
static const GCId UNSET_GCID = (GCId)-1;
private:
GCId _id;
GCId _gc_id;
GCName _name;
GCCause::Cause _cause;
Ticks _start_timestamp;
@ -64,7 +61,7 @@ class SharedGCInfo VALUE_OBJ_CLASS_SPEC {
public:
SharedGCInfo(GCName name) :
_id(UNSET_GCID),
_gc_id(GCId::undefined()),
_name(name),
_cause(GCCause::_last_gc_cause),
_start_timestamp(),
@ -73,8 +70,8 @@ class SharedGCInfo VALUE_OBJ_CLASS_SPEC {
_longest_pause() {
}
void set_id(GCId id) { _id = id; }
GCId id() const { return _id; }
void set_gc_id(GCId gc_id) { _gc_id = gc_id; }
const GCId& gc_id() const { return _gc_id; }
void set_start_timestamp(const Ticks& timestamp) { _start_timestamp = timestamp; }
const Ticks start_timestamp() const { return _start_timestamp; }
@ -131,10 +128,11 @@ class GCTracer : public ResourceObj {
void report_gc_reference_stats(const ReferenceProcessorStats& rp) const;
void report_object_count_after_gc(BoolObjectClosure* object_filter) NOT_SERVICES_RETURN;
bool has_reported_gc_start() const;
const GCId& gc_id() { return _shared_gc_info.gc_id(); }
protected:
GCTracer(GCName name) : _shared_gc_info(name) {}
virtual void report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp);
void report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp);
virtual void report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions);
private:

View File

@ -43,7 +43,7 @@ typedef uintptr_t TraceAddress;
void GCTracer::send_garbage_collection_event() const {
EventGCGarbageCollection event(UNTIMED);
if (event.should_commit()) {
event.set_gcId(_shared_gc_info.id());
event.set_gcId(_shared_gc_info.gc_id().id());
event.set_name(_shared_gc_info.name());
event.set_cause((u2) _shared_gc_info.cause());
event.set_sumOfPauses(_shared_gc_info.sum_of_pauses());
@ -57,7 +57,7 @@ void GCTracer::send_garbage_collection_event() const {
void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const {
EventGCReferenceStatistics e;
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_gcId(_shared_gc_info.gc_id().id());
e.set_type((u1)type);
e.set_count(count);
e.commit();
@ -68,7 +68,7 @@ void GCTracer::send_metaspace_chunk_free_list_summary(GCWhen::Type when, Metaspa
const MetaspaceChunkFreeListSummary& summary) const {
EventMetaspaceChunkFreeListSummary e;
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_gcId(_shared_gc_info.gc_id().id());
e.set_when(when);
e.set_metadataType(mdtype);
@ -91,7 +91,7 @@ void GCTracer::send_metaspace_chunk_free_list_summary(GCWhen::Type when, Metaspa
void ParallelOldTracer::send_parallel_old_event() const {
EventGCParallelOld e(UNTIMED);
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_gcId(_shared_gc_info.gc_id().id());
e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix());
e.set_starttime(_shared_gc_info.start_timestamp());
e.set_endtime(_shared_gc_info.end_timestamp());
@ -102,7 +102,7 @@ void ParallelOldTracer::send_parallel_old_event() const {
void YoungGCTracer::send_young_gc_event() const {
EventGCYoungGarbageCollection e(UNTIMED);
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_gcId(_shared_gc_info.gc_id().id());
e.set_tenuringThreshold(_tenuring_threshold);
e.set_starttime(_shared_gc_info.start_timestamp());
e.set_endtime(_shared_gc_info.end_timestamp());
@ -113,7 +113,7 @@ void YoungGCTracer::send_young_gc_event() const {
void OldGCTracer::send_old_gc_event() const {
EventGCOldGarbageCollection e(UNTIMED);
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_gcId(_shared_gc_info.gc_id().id());
e.set_starttime(_shared_gc_info.start_timestamp());
e.set_endtime(_shared_gc_info.end_timestamp());
e.commit();
@ -132,7 +132,7 @@ static TraceStructCopyFailed to_trace_struct(const CopyFailedInfo& cf_info) {
void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
EventPromotionFailed e;
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_gcId(_shared_gc_info.gc_id().id());
e.set_data(to_trace_struct(pf_info));
e.set_thread(pf_info.thread()->thread_id());
e.commit();
@ -143,7 +143,7 @@ void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_in
void OldGCTracer::send_concurrent_mode_failure_event() {
EventConcurrentModeFailure e;
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_gcId(_shared_gc_info.gc_id().id());
e.commit();
}
}
@ -152,7 +152,7 @@ void OldGCTracer::send_concurrent_mode_failure_event() {
void G1NewTracer::send_g1_young_gc_event() {
EventGCG1GarbageCollection e(UNTIMED);
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_gcId(_shared_gc_info.gc_id().id());
e.set_type(_g1_young_gc_info.type());
e.set_starttime(_shared_gc_info.start_timestamp());
e.set_endtime(_shared_gc_info.end_timestamp());
@ -163,7 +163,7 @@ void G1NewTracer::send_g1_young_gc_event() {
void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
EventEvacuationInfo e;
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_gcId(_shared_gc_info.gc_id().id());
e.set_cSetRegions(info->collectionset_regions());
e.set_cSetUsedBefore(info->collectionset_used_before());
e.set_cSetUsedAfter(info->collectionset_used_after());
@ -179,7 +179,7 @@ void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const {
EventEvacuationFailed e;
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_gcId(_shared_gc_info.gc_id().id());
e.set_data(to_trace_struct(ef_info));
e.commit();
}
@ -206,17 +206,17 @@ static TraceStructObjectSpace to_trace_struct(const SpaceSummary& summary) {
}
class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
GCId _id;
GCId _gc_id;
GCWhen::Type _when;
public:
GCHeapSummaryEventSender(GCId id, GCWhen::Type when) : _id(id), _when(when) {}
GCHeapSummaryEventSender(GCId gc_id, GCWhen::Type when) : _gc_id(gc_id), _when(when) {}
void visit(const GCHeapSummary* heap_summary) const {
const VirtualSpaceSummary& heap_space = heap_summary->heap();
EventGCHeapSummary e;
if (e.should_commit()) {
e.set_gcId(_id);
e.set_gcId(_gc_id.id());
e.set_when((u1)_when);
e.set_heapSpace(to_trace_struct(heap_space));
e.set_heapUsed(heap_summary->used());
@ -236,7 +236,7 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
EventPSHeapSummary e;
if (e.should_commit()) {
e.set_gcId(_id);
e.set_gcId(_gc_id.id());
e.set_when((u1)_when);
e.set_oldSpace(to_trace_struct(ps_heap_summary->old()));
@ -251,7 +251,7 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
};
void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
GCHeapSummaryEventSender visitor(_shared_gc_info.id(), when);
GCHeapSummaryEventSender visitor(_shared_gc_info.gc_id(), when);
heap_summary.accept(&visitor);
}
@ -268,7 +268,7 @@ static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) {
void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const {
EventMetaspaceSummary e;
if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id());
e.set_gcId(_shared_gc_info.gc_id().id());
e.set_when((u1) when);
e.set_gcThreshold(meta_space_summary.capacity_until_GC());
e.set_metaspace(to_trace_struct(meta_space_summary.meta_space()));
@ -287,7 +287,7 @@ class PhaseSender : public PhaseVisitor {
void send_phase(PausePhase* pause) {
T event(UNTIMED);
if (event.should_commit()) {
event.set_gcId(_gc_id);
event.set_gcId(_gc_id.id());
event.set_name(pause->name());
event.set_starttime(pause->start());
event.set_endtime(pause->end());
@ -311,7 +311,7 @@ class PhaseSender : public PhaseVisitor {
};
void GCTracer::send_phase_events(TimePartitions* time_partitions) const {
PhaseSender phase_reporter(_shared_gc_info.id());
PhaseSender phase_reporter(_shared_gc_info.gc_id());
TimePartitionPhasesIterator iter(time_partitions);
while (iter.has_next()) {

View File

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "runtime/globals.hpp"
#include "runtime/os.hpp"
@ -34,7 +35,7 @@
#include "utilities/ticks.inline.hpp"
GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer) :
GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id) :
_title(title), _doit(doit), _print_cr(print_cr), _timer(timer), _start_counter() {
if (_doit || _timer != NULL) {
_start_counter.stamp();
@ -52,6 +53,9 @@ GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* t
gclog_or_tty->stamp();
gclog_or_tty->print(": ");
}
if (PrintGCID) {
gclog_or_tty->print("#%u: ", gc_id.id());
}
gclog_or_tty->print("[%s", title);
gclog_or_tty->flush();
}

View File

@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP
#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP
#include "gc_implementation/shared/gcTrace.hpp"
#include "prims/jni_md.h"
#include "utilities/ticks.hpp"
@ -38,7 +39,7 @@ class GCTraceTime {
Ticks _start_counter;
public:
GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer);
GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id);
~GCTraceTime();
};

View File

@ -28,6 +28,7 @@
#include "gc_implementation/shared/spaceDecorator.hpp"
#include "memory/sharedHeap.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/thread.inline.hpp"
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "runtime/atomic.inline.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/shared/mutableSpace.hpp"

View File

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc_implementation/shared/gcId.hpp"
#include "gc_implementation/shared/objectCountEventSender.hpp"
#include "memory/heapInspection.hpp"
#include "trace/tracing.hpp"
@ -38,7 +39,7 @@ void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, const
"Only call this method if the event is enabled");
EventObjectCountAfterGC event(UNTIMED);
event.set_gcId(gc_id);
event.set_gcId(gc_id.id());
event.set_class(entry->klass());
event.set_count(entry->count());
event.set_totalSize(entry->words() * BytesPerWord);

View File

@ -558,13 +558,13 @@ void CollectedHeap::resize_all_tlabs() {
void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
if (HeapDumpBeforeFullGC) {
GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer);
GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer, GCId::create());
// We are doing a "major" collection and a heap dump before
// major collection has been requested.
HeapDumper::dump_heap();
}
if (PrintClassHistogramBeforeFullGC) {
GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer);
GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer, GCId::create());
VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
inspector.doit();
}
@ -572,11 +572,11 @@ void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
if (HeapDumpAfterFullGC) {
GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer);
GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer, GCId::create());
HeapDumper::dump_heap();
}
if (PrintClassHistogramAfterFullGC) {
GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer);
GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer, GCId::create());
VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
inspector.doit();
}

View File

@ -37,6 +37,7 @@
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"

View File

@ -42,6 +42,7 @@
#include "oops/symbol.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/nativeLookup.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"

View File

@ -29,7 +29,7 @@
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/task.hpp"
#include "runtime/threadCritical.hpp"

View File

@ -29,6 +29,7 @@
#include "memory/generation.hpp"
#include "memory/space.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "utilities/macros.hpp"

View File

@ -41,6 +41,7 @@
#include "memory/space.inline.hpp"
#include "oops/instanceRefKlass.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/prefetch.inline.hpp"
#include "runtime/thread.inline.hpp"
@ -585,7 +586,7 @@ void DefNewGeneration::collect(bool full,
init_assuming_no_promotion_failure();
GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
// Capture heap used before collection (for printing).
size_t gch_prev_used = gch->used();
@ -641,7 +642,7 @@ void DefNewGeneration::collect(bool full,
rp->setup_policy(clear_all_soft_refs);
const ReferenceProcessorStats& stats =
rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
NULL, _gc_timer);
NULL, _gc_timer, gc_tracer.gc_id());
gc_tracer.report_gc_reference_stats(stats);
if (!_promotion_failed) {

View File

@ -26,6 +26,7 @@
#include "memory/gcLocker.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/sharedHeap.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/thread.inline.hpp"
volatile jint GC_locker::_jni_lock_count = 0;
@ -59,6 +60,17 @@ void GC_locker::verify_critical_count() {
assert(_jni_lock_count == count, "must be equal");
}
}
// In debug mode track the locking state at all times
void GC_locker::increment_debug_jni_lock_count() {
assert(_debug_jni_lock_count >= 0, "bad value");
Atomic::inc(&_debug_jni_lock_count);
}
void GC_locker::decrement_debug_jni_lock_count() {
assert(_debug_jni_lock_count > 0, "bad value");
Atomic::dec(&_debug_jni_lock_count);
}
#endif
bool GC_locker::check_active_before_gc() {

View File

@ -94,18 +94,8 @@ class GC_locker: public AllStatic {
}
// In debug mode track the locking state at all times
static void increment_debug_jni_lock_count() {
#ifdef ASSERT
assert(_debug_jni_lock_count >= 0, "bad value");
Atomic::inc(&_debug_jni_lock_count);
#endif
}
static void decrement_debug_jni_lock_count() {
#ifdef ASSERT
assert(_debug_jni_lock_count > 0, "bad value");
Atomic::dec(&_debug_jni_lock_count);
#endif
}
static void increment_debug_jni_lock_count() NOT_DEBUG_RETURN;
static void decrement_debug_jni_lock_count() NOT_DEBUG_RETURN;
// Set the current lock count
static void set_jni_lock_count(int count) {

View File

@ -28,6 +28,7 @@
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "gc_implementation/shared/collectorCounters.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
@ -384,7 +385,9 @@ void GenCollectedHeap::do_collection(bool full,
const char* gc_cause_prefix = complete ? "Full GC" : "GC";
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL);
// The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
// so we can assume here that the next GC id is what we want.
GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
gc_prologue(complete);
increment_total_collections(complete);
@ -417,7 +420,9 @@ void GenCollectedHeap::do_collection(bool full,
}
// Timer for individual generations. Last argument is false: no CR
// FIXME: We should try to start the timing earlier to cover more of the GC pause
GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL);
// The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
// so we can assume here that the next GC id is what we want.
GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek());
TraceCollectorStats tcs(_gens[i]->counters());
TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());

View File

@ -69,7 +69,7 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool c
_ref_processor = rp;
rp->setup_policy(clear_all_softrefs);
GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer->gc_id());
gch->trace_heap_before_gc(_gc_tracer);
@ -193,7 +193,7 @@ void GenMarkSweep::deallocate_stacks() {
void GenMarkSweep::mark_sweep_phase1(int level,
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer);
GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
trace(" 1");
GenCollectedHeap* gch = GenCollectedHeap::heap();
@ -220,7 +220,7 @@ void GenMarkSweep::mark_sweep_phase1(int level,
ref_processor()->setup_policy(clear_all_softrefs);
const ReferenceProcessorStats& stats =
ref_processor()->process_discovered_references(
&is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer);
&is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer, _gc_tracer->gc_id());
gc_tracer()->report_gc_reference_stats(stats);
}
@ -262,7 +262,7 @@ void GenMarkSweep::mark_sweep_phase2() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer);
GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
trace("2");
gch->prepare_for_compaction();
@ -279,7 +279,7 @@ void GenMarkSweep::mark_sweep_phase3(int level) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
// Adjust the pointers to reflect the new locations
GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer);
GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
trace("3");
// Need new claim bits for the pointer adjustment tracing.
@ -327,7 +327,7 @@ void GenMarkSweep::mark_sweep_phase4() {
// to use a higher index (saved from phase2) when verifying perm_gen.
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer);
GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
trace("4");
GenCompactClosure blk;

View File

@ -190,7 +190,8 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
OopClosure* keep_alive,
VoidClosure* complete_gc,
AbstractRefProcTaskExecutor* task_executor,
GCTimer* gc_timer) {
GCTimer* gc_timer,
GCId gc_id) {
NOT_PRODUCT(verify_ok_to_handle_reflists());
assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
@ -212,7 +213,7 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
// Soft references
size_t soft_count = 0;
{
GCTraceTime tt("SoftReference", trace_time, false, gc_timer);
GCTraceTime tt("SoftReference", trace_time, false, gc_timer, gc_id);
soft_count =
process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
is_alive, keep_alive, complete_gc, task_executor);
@ -223,7 +224,7 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
// Weak references
size_t weak_count = 0;
{
GCTraceTime tt("WeakReference", trace_time, false, gc_timer);
GCTraceTime tt("WeakReference", trace_time, false, gc_timer, gc_id);
weak_count =
process_discovered_reflist(_discoveredWeakRefs, NULL, true,
is_alive, keep_alive, complete_gc, task_executor);
@ -232,7 +233,7 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
// Final references
size_t final_count = 0;
{
GCTraceTime tt("FinalReference", trace_time, false, gc_timer);
GCTraceTime tt("FinalReference", trace_time, false, gc_timer, gc_id);
final_count =
process_discovered_reflist(_discoveredFinalRefs, NULL, false,
is_alive, keep_alive, complete_gc, task_executor);
@ -241,7 +242,7 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
// Phantom references
size_t phantom_count = 0;
{
GCTraceTime tt("PhantomReference", trace_time, false, gc_timer);
GCTraceTime tt("PhantomReference", trace_time, false, gc_timer, gc_id);
phantom_count =
process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
is_alive, keep_alive, complete_gc, task_executor);
@ -253,7 +254,7 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
// thus use JNI weak references to circumvent the phantom references and
// resurrect a "post-mortem" object.
{
GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer);
GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer, gc_id);
if (task_executor != NULL) {
task_executor->set_single_threaded_mode();
}
@ -1251,14 +1252,15 @@ void ReferenceProcessor::preclean_discovered_references(
OopClosure* keep_alive,
VoidClosure* complete_gc,
YieldClosure* yield,
GCTimer* gc_timer) {
GCTimer* gc_timer,
GCId gc_id) {
NOT_PRODUCT(verify_ok_to_handle_reflists());
// Soft references
{
GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
false, gc_timer);
false, gc_timer, gc_id);
for (uint i = 0; i < _max_num_q; i++) {
if (yield->should_return()) {
return;
@ -1271,7 +1273,7 @@ void ReferenceProcessor::preclean_discovered_references(
// Weak references
{
GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
false, gc_timer);
false, gc_timer, gc_id);
for (uint i = 0; i < _max_num_q; i++) {
if (yield->should_return()) {
return;
@ -1284,7 +1286,7 @@ void ReferenceProcessor::preclean_discovered_references(
// Final references
{
GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
false, gc_timer);
false, gc_timer, gc_id);
for (uint i = 0; i < _max_num_q; i++) {
if (yield->should_return()) {
return;
@ -1297,7 +1299,7 @@ void ReferenceProcessor::preclean_discovered_references(
// Phantom references
{
GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
false, gc_timer);
false, gc_timer, gc_id);
for (uint i = 0; i < _max_num_q; i++) {
if (yield->should_return()) {
return;

View File

@ -25,6 +25,7 @@
#ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
#define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
#include "gc_implementation/shared/gcTrace.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/referenceProcessorStats.hpp"
#include "memory/referenceType.hpp"
@ -349,7 +350,8 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
OopClosure* keep_alive,
VoidClosure* complete_gc,
YieldClosure* yield,
GCTimer* gc_timer);
GCTimer* gc_timer,
GCId gc_id);
// Delete entries in the discovered lists that have
// either a null referent or are not active. Such
@ -480,7 +482,8 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
OopClosure* keep_alive,
VoidClosure* complete_gc,
AbstractRefProcTaskExecutor* task_executor,
GCTimer *gc_timer);
GCTimer *gc_timer,
GCId gc_id);
// Enqueue references at end of GC (called by the garbage collector)
bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);

View File

@ -37,6 +37,7 @@
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "runtime/java.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/prefetch.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/safepoint.hpp"

View File

@ -25,7 +25,6 @@
#ifndef SHARE_VM_MEMORY_SPECIALIZED_OOP_CLOSURES_HPP
#define SHARE_VM_MEMORY_SPECIALIZED_OOP_CLOSURES_HPP
#include "runtime/atomic.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1_specialized_oop_closures.hpp"

View File

@ -27,7 +27,6 @@
#include "gc_interface/collectedHeap.hpp"
#include "memory/threadLocalAllocBuffer.hpp"
#include "runtime/atomic.hpp"
#include "runtime/thread.hpp"
#include "utilities/copy.hpp"

View File

@ -53,6 +53,7 @@
#include "oops/typeArrayKlass.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/handles.inline.hpp"

View File

@ -27,11 +27,27 @@
#include "oops/klass.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline2.hpp"
#include "runtime/atomic.inline.hpp"
volatile int CompiledICHolder::_live_count;
volatile int CompiledICHolder::_live_not_claimed_count;
CompiledICHolder::CompiledICHolder(Method* method, Klass* klass)
: _holder_method(method), _holder_klass(klass) {
#ifdef ASSERT
Atomic::inc(&_live_count);
Atomic::inc(&_live_not_claimed_count);
#endif // ASSERT
}
#ifdef ASSERT
CompiledICHolder::~CompiledICHolder() {
assert(_live_count > 0, "underflow");
Atomic::dec(&_live_count);
}
#endif // ASSERT
// Printing
void CompiledICHolder::print_on(outputStream* st) const {
@ -51,3 +67,11 @@ void CompiledICHolder::verify_on(outputStream* st) {
guarantee(holder_method()->is_method(), "should be method");
guarantee(holder_klass()->is_klass(), "should be klass");
}
#ifdef ASSERT
void CompiledICHolder::claim() {
Atomic::dec(&_live_not_claimed_count);
}
#endif // ASSERT

View File

@ -26,6 +26,7 @@
#define SHARE_VM_OOPS_COMPILEDICHOLDEROOP_HPP
#include "oops/oop.hpp"
#include "utilities/macros.hpp"
// A CompiledICHolder* is a helper object for the inline cache implementation.
// It holds an intermediate value (method+klass pair) used when converting from
@ -50,20 +51,8 @@ class CompiledICHolder : public CHeapObj<mtCompiler> {
public:
// Constructor
CompiledICHolder(Method* method, Klass* klass)
: _holder_method(method), _holder_klass(klass) {
#ifdef ASSERT
Atomic::inc(&_live_count);
Atomic::inc(&_live_not_claimed_count);
#endif
}
~CompiledICHolder() {
#ifdef ASSERT
assert(_live_count > 0, "underflow");
Atomic::dec(&_live_count);
#endif
}
CompiledICHolder(Method* method, Klass* klass);
~CompiledICHolder() NOT_DEBUG_RETURN;
static int live_count() { return _live_count; }
static int live_not_claimed_count() { return _live_not_claimed_count; }
@ -91,11 +80,7 @@ class CompiledICHolder : public CHeapObj<mtCompiler> {
const char* internal_name() const { return "{compiledICHolder}"; }
void claim() {
#ifdef ASSERT
Atomic::dec(&_live_not_claimed_count);
#endif
}
void claim() NOT_DEBUG_RETURN;
};
#endif // SHARE_VM_OOPS_COMPILEDICHOLDEROOP_HPP

View File

@ -32,6 +32,7 @@
#include "oops/oop.inline.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "utilities/macros.hpp"

View File

@ -51,6 +51,7 @@
#include "prims/jvmtiRedefineClasses.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/methodComparator.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/fieldDescriptor.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"

View File

@ -32,7 +32,6 @@
#include "oops/fieldInfo.hpp"
#include "oops/instanceOop.hpp"
#include "oops/klassVtable.hpp"
#include "runtime/atomic.hpp"
#include "runtime/handles.hpp"
#include "runtime/os.hpp"
#include "utilities/accessFlags.hpp"

View File

@ -26,6 +26,7 @@
#define SHARE_VM_OOPS_OOP_PCGC_INLINE_HPP
#include "utilities/macros.hpp"
#include "runtime/atomic.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/parNew/parNewGeneration.hpp"
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"

View File

@ -26,11 +26,11 @@
#include "precompiled.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/classLoaderData.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/symbol.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
Symbol::Symbol(const u1* name, int length, int refcount) {
_refcount = refcount;

View File

@ -36,7 +36,6 @@
#include "opto/runtime.hpp"
#include "opto/type.hpp"
#include "opto/vectornode.hpp"
#include "runtime/atomic.hpp"
#include "runtime/os.hpp"
#ifdef TARGET_ARCH_MODEL_x86_32
# include "adfiles/ad_x86_32.hpp"

View File

@ -55,6 +55,7 @@
#include "opto/mulnode.hpp"
#include "opto/runtime.hpp"
#include "opto/subnode.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.hpp"

View File

@ -59,6 +59,7 @@
#include "prims/jvm_misc.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/fieldDescriptor.hpp"
#include "runtime/fprofiler.hpp"

View File

@ -44,6 +44,7 @@
#include "prims/nativeLookup.hpp"
#include "prims/privilegedStack.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/dtraceJSDT.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
@ -55,6 +56,7 @@
#include "runtime/os.hpp"
#include "runtime/perfData.hpp"
#include "runtime/reflection.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vm_operations.hpp"
#include "services/attachListener.hpp"

View File

@ -32,7 +32,7 @@
#include "prims/jvmtiEventController.inline.hpp"
#include "prims/jvmtiImpl.hpp"
#include "prims/jvmtiRedefineClasses.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/handles.hpp"
#include "runtime/handles.inline.hpp"

View File

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "prims/jvmtiRawMonitor.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/thread.inline.hpp"

View File

@ -31,6 +31,7 @@
#include "memory/allocation.inline.hpp"
#include "prims/jni.h"
#include "prims/jvm.h"
#include "runtime/atomic.inline.hpp"
#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/prefetch.inline.hpp"

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "oops/klass.inline.hpp"
#include "oops/markOop.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/task.hpp"

View File

@ -25,6 +25,9 @@
#ifndef SHARE_VM_RUNTIME_EXTENDEDPC_HPP
#define SHARE_VM_RUNTIME_EXTENDEDPC_HPP
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
// An ExtendedPC contains the _pc from a signal handler in a platform
// independent way.

View File

@ -44,6 +44,7 @@
#include "runtime/signature.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/decoder.hpp"
#ifdef TARGET_ARCH_x86

View File

@ -2299,6 +2299,9 @@ class CommandLineFlags {
manageable(bool, PrintGCTimeStamps, false, \
"Print timestamps at garbage collection") \
\
manageable(bool, PrintGCID, true, \
"Print an identifier for each garbage collection") \
\
product(bool, PrintGCTaskTimeStamps, false, \
"Print timestamps for individual gc worker thread tasks") \
\

View File

@ -26,6 +26,7 @@
#include "memory/allocation.inline.hpp"
#include "oops/constantPool.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/thread.inline.hpp"
#ifdef TARGET_OS_FAMILY_linux

View File

@ -28,6 +28,7 @@
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/orderAccess.inline.hpp"

View File

@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/mutex.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/osThread.hpp"

View File

@ -27,6 +27,7 @@
#include "memory/resourceArea.hpp"
#include "oops/markOop.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/mutexLocker.hpp"

View File

@ -40,6 +40,7 @@
#include "prims/jvm_misc.hpp"
#include "prims/privilegedStack.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/java.hpp"

View File

@ -26,7 +26,6 @@
#define SHARE_VM_RUNTIME_OS_HPP
#include "jvmtifiles/jvmti.h"
#include "runtime/atomic.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/handles.hpp"
#include "utilities/top.hpp"

View File

@ -32,10 +32,12 @@
#include "code/scopeDesc.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"

View File

@ -42,6 +42,7 @@
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "prims/methodHandles.hpp"
#include "prims/nativeLookup.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/arguments.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/handles.inline.hpp"

Some files were not shown because too many files have changed in this diff Show More