6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent

Make JvmtiGCMark safe to run non-safepoint and instrument CMS

Reviewed-by: ysr, dcubed
This commit is contained in:
Keith McGuigan 2011-01-10 17:14:53 -05:00
parent 6215ab50b3
commit ae65c6240f
16 changed files with 157 additions and 456 deletions

View File

@ -3478,6 +3478,7 @@ void CMSCollector::checkpointRootsInitial(bool asynch) {
assert(_collectorState == InitialMarking, "Wrong collector state"); assert(_collectorState == InitialMarking, "Wrong collector state");
check_correct_thread_executing(); check_correct_thread_executing();
TraceCMSMemoryManagerStats tms(_collectorState); TraceCMSMemoryManagerStats tms(_collectorState);
ReferenceProcessor* rp = ref_processor(); ReferenceProcessor* rp = ref_processor();
SpecializationStats::clear(); SpecializationStats::clear();
assert(_restart_addr == NULL, "Control point invariant"); assert(_restart_addr == NULL, "Control point invariant");
@ -5940,11 +5941,6 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
} }
rp->verify_no_references_recorded(); rp->verify_no_references_recorded();
assert(!rp->discovery_enabled(), "should have been disabled"); assert(!rp->discovery_enabled(), "should have been disabled");
// JVMTI object tagging is based on JNI weak refs. If any of these
// refs were cleared then JVMTI needs to update its maps and
// maybe post ObjectFrees to agents.
JvmtiExport::cms_ref_processing_epilogue();
} }
#ifndef PRODUCT #ifndef PRODUCT
@ -6305,6 +6301,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op) {
switch (op) { switch (op) {
case CMS_op_checkpointRootsInitial: { case CMS_op_checkpointRootsInitial: {
SvcGCMarker sgcm(SvcGCMarker::OTHER);
checkpointRootsInitial(true); // asynch checkpointRootsInitial(true); // asynch
if (PrintGC) { if (PrintGC) {
_cmsGen->printOccupancy("initial-mark"); _cmsGen->printOccupancy("initial-mark");
@ -6312,6 +6309,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op) {
break; break;
} }
case CMS_op_checkpointRootsFinal: { case CMS_op_checkpointRootsFinal: {
SvcGCMarker sgcm(SvcGCMarker::OTHER);
checkpointRootsFinal(true, // asynch checkpointRootsFinal(true, // asynch
false, // !clear_all_soft_refs false, // !clear_all_soft_refs
false); // !init_mark_was_synchronous false); // !init_mark_was_synchronous

View File

@ -31,6 +31,7 @@
#include "gc_implementation/g1/g1RemSet.hpp" #include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "memory/genOopClosures.inline.hpp" #include "memory/genOopClosures.inline.hpp"
#include "memory/referencePolicy.hpp" #include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
@ -1142,6 +1143,8 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
return; return;
} }
SvcGCMarker sgcm(SvcGCMarker::OTHER);
if (VerifyDuringGC) { if (VerifyDuringGC) {
HandleMark hm; // handle scope HandleMark hm; // handle scope
gclog_or_tty->print(" VerifyDuringGC:(before)"); gclog_or_tty->print(" VerifyDuringGC:(before)");

View File

@ -1192,7 +1192,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
return false; return false;
} }
DTraceGCProbeMarker gc_probe_marker(true /* full */); SvcGCMarker sgcm(SvcGCMarker::FULL);
ResourceMark rm; ResourceMark rm;
if (PrintHeapAtGC) { if (PrintHeapAtGC) {
@ -3214,7 +3214,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
return false; return false;
} }
DTraceGCProbeMarker gc_probe_marker(false /* full */); SvcGCMarker sgcm(SvcGCMarker::MINOR);
ResourceMark rm; ResourceMark rm;
if (PrintHeapAtGC) { if (PrintHeapAtGC) {

View File

@ -38,7 +38,6 @@ VM_G1CollectForAllocation::VM_G1CollectForAllocation(
} }
void VM_G1CollectForAllocation::doit() { void VM_G1CollectForAllocation::doit() {
JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
_result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded); _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
assert(_result == NULL || _pause_succeeded, assert(_result == NULL || _pause_succeeded,
@ -46,7 +45,6 @@ void VM_G1CollectForAllocation::doit() {
} }
void VM_G1CollectFull::doit() { void VM_G1CollectFull::doit() {
JvmtiGCFullMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCCauseSetter x(g1h, _gc_cause); GCCauseSetter x(g1h, _gc_cause);
g1h->do_full_collection(false /* clear_all_soft_refs */); g1h->do_full_collection(false /* clear_all_soft_refs */);
@ -72,7 +70,6 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause(
} }
void VM_G1IncCollectionPause::doit() { void VM_G1IncCollectionPause::doit() {
JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(!_should_initiate_conc_mark || assert(!_should_initiate_conc_mark ||
((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||

View File

@ -42,8 +42,7 @@ VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size,
} }
void VM_ParallelGCFailedAllocation::doit() { void VM_ParallelGCFailedAllocation::doit() {
JvmtiGCForAllocationMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::MINOR);
notify_gc_begin(false);
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap"); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
@ -54,8 +53,6 @@ void VM_ParallelGCFailedAllocation::doit() {
if (_result == NULL && GC_locker::is_active_and_needs_gc()) { if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
notify_gc_end();
} }
VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(size_t size, VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(size_t size,
@ -67,8 +64,7 @@ VM_ParallelGCFailedPermanentAllocation::VM_ParallelGCFailedPermanentAllocation(s
} }
void VM_ParallelGCFailedPermanentAllocation::doit() { void VM_ParallelGCFailedPermanentAllocation::doit() {
JvmtiGCFullMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::FULL);
notify_gc_begin(true);
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap"); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
@ -78,7 +74,6 @@ void VM_ParallelGCFailedPermanentAllocation::doit() {
if (_result == NULL && GC_locker::is_active_and_needs_gc()) { if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
notify_gc_end();
} }
// Only used for System.gc() calls // Only used for System.gc() calls
@ -91,8 +86,7 @@ VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(unsigned int gc_count,
} }
void VM_ParallelGCSystemGC::doit() { void VM_ParallelGCSystemGC::doit() {
JvmtiGCFullMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::FULL);
notify_gc_begin(true);
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
@ -106,5 +100,4 @@ void VM_ParallelGCSystemGC::doit() {
} else { } else {
heap->invoke_full_gc(false); heap->invoke_full_gc(false);
} }
notify_gc_end();
} }

View File

@ -31,7 +31,6 @@
#include "memory/oopFactory.hpp" #include "memory/oopFactory.hpp"
#include "oops/instanceKlass.hpp" #include "oops/instanceKlass.hpp"
#include "oops/instanceRefKlass.hpp" #include "oops/instanceRefKlass.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/init.hpp" #include "runtime/init.hpp"
#include "runtime/interfaceSupport.hpp" #include "runtime/interfaceSupport.hpp"
@ -40,6 +39,7 @@
#ifndef SERIALGC #ifndef SERIALGC
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#endif #endif
HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool); HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool);
HS_DTRACE_PROBE_DECL(hotspot, gc__end); HS_DTRACE_PROBE_DECL(hotspot, gc__end);
@ -158,8 +158,7 @@ void VM_GC_HeapInspection::doit() {
void VM_GenCollectForAllocation::doit() { void VM_GenCollectForAllocation::doit() {
JvmtiGCForAllocationMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::MINOR);
notify_gc_begin(false);
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, _gc_cause); GCCauseSetter gccs(gch, _gc_cause);
@ -169,22 +168,19 @@ void VM_GenCollectForAllocation::doit() {
if (_res == NULL && GC_locker::is_active_and_needs_gc()) { if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
notify_gc_end();
} }
void VM_GenCollectFull::doit() { void VM_GenCollectFull::doit() {
JvmtiGCFullMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::FULL);
notify_gc_begin(true);
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, _gc_cause); GCCauseSetter gccs(gch, _gc_cause);
gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level); gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
notify_gc_end();
} }
void VM_GenCollectForPermanentAllocation::doit() { void VM_GenCollectForPermanentAllocation::doit() {
JvmtiGCForAllocationMarker jgcm; SvcGCMarker sgcm(SvcGCMarker::FULL);
notify_gc_begin(true);
SharedHeap* heap = (SharedHeap*)Universe::heap(); SharedHeap* heap = (SharedHeap*)Universe::heap();
GCCauseSetter gccs(heap, _gc_cause); GCCauseSetter gccs(heap, _gc_cause);
switch (heap->kind()) { switch (heap->kind()) {
@ -209,5 +205,4 @@ void VM_GenCollectForPermanentAllocation::doit() {
if (_res == NULL && GC_locker::is_active_and_needs_gc()) { if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked(); set_gc_locked();
} }
notify_gc_end();
} }

View File

@ -30,6 +30,7 @@
#include "runtime/jniHandles.hpp" #include "runtime/jniHandles.hpp"
#include "runtime/synchronizer.hpp" #include "runtime/synchronizer.hpp"
#include "runtime/vm_operations.hpp" #include "runtime/vm_operations.hpp"
#include "prims/jvmtiExport.hpp"
// The following class hierarchy represents // The following class hierarchy represents
// a set of operations (VM_Operation) related to GC. // a set of operations (VM_Operation) related to GC.
@ -209,13 +210,17 @@ class VM_GenCollectForPermanentAllocation: public VM_GC_Operation {
HeapWord* result() const { return _res; } HeapWord* result() const { return _res; }
}; };
class DTraceGCProbeMarker : public StackObj { class SvcGCMarker : public StackObj {
private:
JvmtiGCMarker _jgcm;
public: public:
DTraceGCProbeMarker(bool full) { typedef enum { MINOR, FULL, OTHER } reason_type;
VM_GC_Operation::notify_gc_begin(full);
SvcGCMarker(reason_type reason ) {
VM_GC_Operation::notify_gc_begin(reason == FULL);
} }
~DTraceGCProbeMarker() { ~SvcGCMarker() {
VM_GC_Operation::notify_gc_end(); VM_GC_Operation::notify_gc_end();
} }
}; };

View File

@ -13048,8 +13048,8 @@ myInit() {
<event label="Garbage Collection Start" <event label="Garbage Collection Start"
id="GarbageCollectionStart" const="JVMTI_EVENT_GARBAGE_COLLECTION_START" num="81"> id="GarbageCollectionStart" const="JVMTI_EVENT_GARBAGE_COLLECTION_START" num="81">
<description> <description>
A Garbage Collection Start event is sent when a full cycle A Garbage Collection Start event is sent when a
garbage collection begins. garbage collection pause begins.
Only stop-the-world collections are reported--that is, collections during Only stop-the-world collections are reported--that is, collections during
which all threads cease to modify the state of the Java virtual machine. which all threads cease to modify the state of the Java virtual machine.
This means that some collectors will never generate these events. This means that some collectors will never generate these events.
@ -13075,8 +13075,8 @@ myInit() {
<event label="Garbage Collection Finish" <event label="Garbage Collection Finish"
id="GarbageCollectionFinish" const="JVMTI_EVENT_GARBAGE_COLLECTION_FINISH" num="82"> id="GarbageCollectionFinish" const="JVMTI_EVENT_GARBAGE_COLLECTION_FINISH" num="82">
<description> <description>
A Garbage Collection Finish event is sent when a full A Garbage Collection Finish event is sent when a
garbage collection cycle ends. garbage collection pause ends.
This event is sent while the VM is still stopped, thus This event is sent while the VM is still stopped, thus
the event handler must not use JNI functions and the event handler must not use JNI functions and
must not use <jvmti/> functions except those which must not use <jvmti/> functions except those which

View File

@ -2358,15 +2358,6 @@ jint JvmtiExport::load_agent_library(AttachOperation* op, outputStream* st) {
} }
#endif // SERVICES_KERNEL #endif // SERVICES_KERNEL
// CMS has completed referencing processing so may need to update
// tag maps.
void JvmtiExport::cms_ref_processing_epilogue() {
if (JvmtiEnv::environments_might_exist()) {
JvmtiTagMap::cms_ref_processing_epilogue();
}
}
//////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////
// Setup current current thread for event collection. // Setup current current thread for event collection.
@ -2536,37 +2527,21 @@ NoJvmtiVMObjectAllocMark::~NoJvmtiVMObjectAllocMark() {
} }
}; };
JvmtiGCMarker::JvmtiGCMarker(bool full) : _full(full), _invocation_count(0) { JvmtiGCMarker::JvmtiGCMarker() {
assert(Thread::current()->is_VM_thread(), "wrong thread");
// if there aren't any JVMTI environments then nothing to do // if there aren't any JVMTI environments then nothing to do
if (!JvmtiEnv::environments_might_exist()) { if (!JvmtiEnv::environments_might_exist()) {
return; return;
} }
if (ForceFullGCJVMTIEpilogues) {
// force 'Full GC' was done semantics for JVMTI GC epilogues
_full = true;
}
// GarbageCollectionStart event posted from VM thread - okay because
// JVMTI is clear that the "world is stopped" and callback shouldn't
// try to call into the VM.
if (JvmtiExport::should_post_garbage_collection_start()) { if (JvmtiExport::should_post_garbage_collection_start()) {
JvmtiExport::post_garbage_collection_start(); JvmtiExport::post_garbage_collection_start();
} }
// if "full" is false it probably means this is a scavenge of the young if (SafepointSynchronize::is_at_safepoint()) {
// generation. However it could turn out that a "full" GC is required
// so we record the number of collections so that it can be checked in
// the destructor.
if (!_full) {
_invocation_count = Universe::heap()->total_full_collections();
}
// Do clean up tasks that need to be done at a safepoint // Do clean up tasks that need to be done at a safepoint
JvmtiEnvBase::check_for_periodic_clean_up(); JvmtiEnvBase::check_for_periodic_clean_up();
} }
}
JvmtiGCMarker::~JvmtiGCMarker() { JvmtiGCMarker::~JvmtiGCMarker() {
// if there aren't any JVMTI environments then nothing to do // if there aren't any JVMTI environments then nothing to do
@ -2578,21 +2553,5 @@ JvmtiGCMarker::~JvmtiGCMarker() {
if (JvmtiExport::should_post_garbage_collection_finish()) { if (JvmtiExport::should_post_garbage_collection_finish()) {
JvmtiExport::post_garbage_collection_finish(); JvmtiExport::post_garbage_collection_finish();
} }
// we might have initially started out doing a scavenge of the young
// generation but could have ended up doing a "full" GC - check the
// GC count to see.
if (!_full) {
_full = (_invocation_count != Universe::heap()->total_full_collections());
}
// Full collection probably means the perm generation has been GC'ed
// so we clear the breakpoint cache.
if (_full) {
JvmtiCurrentBreakpoints::gc_epilogue();
}
// Notify heap/object tagging support
JvmtiTagMap::gc_epilogue(_full);
} }
#endif // JVMTI_KERNEL #endif // JVMTI_KERNEL

View File

@ -356,9 +356,6 @@ class JvmtiExport : public AllStatic {
// SetNativeMethodPrefix support // SetNativeMethodPrefix support
static char** get_all_native_method_prefixes(int* count_ptr); static char** get_all_native_method_prefixes(int* count_ptr);
// call after CMS has completed referencing processing
static void cms_ref_processing_epilogue() KERNEL_RETURN;
}; };
// Support class used by JvmtiDynamicCodeEventCollector and others. It // Support class used by JvmtiDynamicCodeEventCollector and others. It
@ -492,55 +489,11 @@ class NoJvmtiVMObjectAllocMark : public StackObj {
// Base class for reporting GC events to JVMTI. // Base class for reporting GC events to JVMTI.
class JvmtiGCMarker : public StackObj { class JvmtiGCMarker : public StackObj {
private:
bool _full; // marks a "full" GC
unsigned int _invocation_count; // GC invocation count
protected:
JvmtiGCMarker(bool full) KERNEL_RETURN; // protected
~JvmtiGCMarker() KERNEL_RETURN; // protected
};
// Support class used to report GC events to JVMTI. The class is stack
// allocated and should be placed in the doit() implementation of all
// vm operations that do a stop-the-world GC for failed allocation.
//
// Usage :-
//
// void VM_GenCollectForAllocation::doit() {
// JvmtiGCForAllocationMarker jgcm;
// :
// }
//
// If jvmti is not enabled the constructor and destructor is essentially
// a no-op (no overhead).
//
class JvmtiGCForAllocationMarker : public JvmtiGCMarker {
public: public:
JvmtiGCForAllocationMarker() : JvmtiGCMarker(false) { JvmtiGCMarker() KERNEL_RETURN;
} ~JvmtiGCMarker() KERNEL_RETURN;
}; };
// Support class used to report GC events to JVMTI. The class is stack
// allocated and should be placed in the doit() implementation of all
// vm operations that do a "full" stop-the-world GC. This class differs
// from JvmtiGCForAllocationMarker in that this class assumes that a
// "full" GC will happen.
//
// Usage :-
//
// void VM_GenCollectFull::doit() {
// JvmtiGCFullMarker jgcm;
// :
// }
//
class JvmtiGCFullMarker : public JvmtiGCMarker {
public:
JvmtiGCFullMarker() : JvmtiGCMarker(true) {
}
};
// JvmtiHideSingleStepping is a helper class for hiding // JvmtiHideSingleStepping is a helper class for hiding
// internal single step events. // internal single step events.
class JvmtiHideSingleStepping : public StackObj { class JvmtiHideSingleStepping : public StackObj {

View File

@ -212,14 +212,7 @@ void GrowableCache::oops_do(OopClosure* f) {
for (int i=0; i<len; i++) { for (int i=0; i<len; i++) {
GrowableElement *e = _elements->at(i); GrowableElement *e = _elements->at(i);
e->oops_do(f); e->oops_do(f);
} _cache[i] = e->getCacheValue();
}
void GrowableCache::gc_epilogue() {
int len = _elements->length();
// recompute the new cache value after GC
for (int i=0; i<len; i++) {
_cache[i] = _elements->at(i)->getCacheValue();
} }
} }
@ -401,10 +394,6 @@ void JvmtiBreakpoints::oops_do(OopClosure* f) {
_bps.oops_do(f); _bps.oops_do(f);
} }
void JvmtiBreakpoints::gc_epilogue() {
_bps.gc_epilogue();
}
void JvmtiBreakpoints::print() { void JvmtiBreakpoints::print() {
#ifndef PRODUCT #ifndef PRODUCT
ResourceMark rm; ResourceMark rm;
@ -534,13 +523,6 @@ void JvmtiCurrentBreakpoints::oops_do(OopClosure* f) {
} }
} }
void JvmtiCurrentBreakpoints::gc_epilogue() {
if (_jvmti_breakpoints != NULL) {
_jvmti_breakpoints->gc_epilogue();
}
}
/////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////
// //
// class VM_GetOrSetLocal // class VM_GetOrSetLocal

View File

@ -117,7 +117,6 @@ public:
void clear(); void clear();
// apply f to every element and update the cache // apply f to every element and update the cache
void oops_do(OopClosure* f); void oops_do(OopClosure* f);
void gc_epilogue();
}; };
@ -149,7 +148,6 @@ public:
void remove (int index) { _cache.remove(index); } void remove (int index) { _cache.remove(index); }
void clear() { _cache.clear(); } void clear() { _cache.clear(); }
void oops_do(OopClosure* f) { _cache.oops_do(f); } void oops_do(OopClosure* f) { _cache.oops_do(f); }
void gc_epilogue() { _cache.gc_epilogue(); }
}; };
@ -278,7 +276,6 @@ public:
int length(); int length();
void oops_do(OopClosure* f); void oops_do(OopClosure* f);
void gc_epilogue();
void print(); void print();
int set(JvmtiBreakpoint& bp); int set(JvmtiBreakpoint& bp);
@ -328,7 +325,6 @@ public:
static inline bool is_breakpoint(address bcp); static inline bool is_breakpoint(address bcp);
static void oops_do(OopClosure* f); static void oops_do(OopClosure* f);
static void gc_epilogue();
}; };
// quickly test whether the bcp matches a cached breakpoint in the list // quickly test whether the bcp matches a cached breakpoint in the list

View File

@ -50,7 +50,7 @@
// JvmtiTagHashmapEntry // JvmtiTagHashmapEntry
// //
// Each entry encapsulates a JNI weak reference to the tagged object // Each entry encapsulates a reference to the tagged object
// and the tag value. In addition an entry includes a next pointer which // and the tag value. In addition an entry includes a next pointer which
// is used to chain entries together. // is used to chain entries together.
@ -58,23 +58,24 @@ class JvmtiTagHashmapEntry : public CHeapObj {
private: private:
friend class JvmtiTagMap; friend class JvmtiTagMap;
jweak _object; // JNI weak ref to tagged object oop _object; // tagged object
jlong _tag; // the tag jlong _tag; // the tag
JvmtiTagHashmapEntry* _next; // next on the list JvmtiTagHashmapEntry* _next; // next on the list
inline void init(jweak object, jlong tag) { inline void init(oop object, jlong tag) {
_object = object; _object = object;
_tag = tag; _tag = tag;
_next = NULL; _next = NULL;
} }
// constructor // constructor
JvmtiTagHashmapEntry(jweak object, jlong tag) { init(object, tag); } JvmtiTagHashmapEntry(oop object, jlong tag) { init(object, tag); }
public: public:
// accessor methods // accessor methods
inline jweak object() const { return _object; } inline oop object() const { return _object; }
inline oop* object_addr() { return &_object; }
inline jlong tag() const { return _tag; } inline jlong tag() const { return _tag; }
inline void set_tag(jlong tag) { inline void set_tag(jlong tag) {
@ -92,9 +93,7 @@ class JvmtiTagHashmapEntry : public CHeapObj {
// A hashmap is essentially a table of pointers to entries. Entries // A hashmap is essentially a table of pointers to entries. Entries
// are hashed to a location, or position in the table, and then // are hashed to a location, or position in the table, and then
// chained from that location. The "key" for hashing is address of // chained from that location. The "key" for hashing is address of
// the object, or oop. The "value" is the JNI weak reference to the // the object, or oop. The "value" is the tag value.
// object and the tag value. Keys are not stored with the entry.
// Instead the weak reference is resolved to obtain the key.
// //
// A hashmap maintains a count of the number entries in the hashmap // A hashmap maintains a count of the number entries in the hashmap
// and resizes if the number of entries exceeds a given threshold. // and resizes if the number of entries exceeds a given threshold.
@ -206,7 +205,7 @@ class JvmtiTagHashmap : public CHeapObj {
JvmtiTagHashmapEntry* entry = _table[i]; JvmtiTagHashmapEntry* entry = _table[i];
while (entry != NULL) { while (entry != NULL) {
JvmtiTagHashmapEntry* next = entry->next(); JvmtiTagHashmapEntry* next = entry->next();
oop key = JNIHandles::resolve(entry->object()); oop key = entry->object();
assert(key != NULL, "jni weak reference cleared!!"); assert(key != NULL, "jni weak reference cleared!!");
unsigned int h = hash(key, new_size); unsigned int h = hash(key, new_size);
JvmtiTagHashmapEntry* anchor = new_table[h]; JvmtiTagHashmapEntry* anchor = new_table[h];
@ -299,14 +298,12 @@ class JvmtiTagHashmap : public CHeapObj {
unsigned int h = hash(key); unsigned int h = hash(key);
JvmtiTagHashmapEntry* entry = _table[h]; JvmtiTagHashmapEntry* entry = _table[h];
while (entry != NULL) { while (entry != NULL) {
oop orig_key = JNIHandles::resolve(entry->object()); if (entry->object() == key) {
assert(orig_key != NULL, "jni weak reference cleared!!"); return entry;
if (key == orig_key) {
break;
} }
entry = entry->next(); entry = entry->next();
} }
return entry; return NULL;
} }
@ -343,9 +340,7 @@ class JvmtiTagHashmap : public CHeapObj {
JvmtiTagHashmapEntry* entry = _table[h]; JvmtiTagHashmapEntry* entry = _table[h];
JvmtiTagHashmapEntry* prev = NULL; JvmtiTagHashmapEntry* prev = NULL;
while (entry != NULL) { while (entry != NULL) {
oop orig_key = JNIHandles::resolve(entry->object()); if (key == entry->object()) {
assert(orig_key != NULL, "jni weak reference cleared!!");
if (key == orig_key) {
break; break;
} }
prev = entry; prev = entry;
@ -418,54 +413,6 @@ void JvmtiTagHashmap::compute_next_trace_threshold() {
} }
} }
// memory region for young generation
MemRegion JvmtiTagMap::_young_gen;
// get the memory region used for the young generation
void JvmtiTagMap::get_young_generation() {
CollectedHeap* ch = Universe::heap();
switch (ch->kind()) {
case (CollectedHeap::GenCollectedHeap): {
_young_gen = ((GenCollectedHeap*)ch)->get_gen(0)->reserved();
break;
}
#ifndef SERIALGC
case (CollectedHeap::ParallelScavengeHeap): {
_young_gen = ((ParallelScavengeHeap*)ch)->young_gen()->reserved();
break;
}
case (CollectedHeap::G1CollectedHeap): {
// Until a more satisfactory solution is implemented, all
// oops in the tag map will require rehash at each gc.
// This is a correct, if extremely inefficient solution.
// See RFE 6621729 for related commentary.
_young_gen = ch->reserved_region();
break;
}
#endif // !SERIALGC
default:
ShouldNotReachHere();
}
}
// returns true if oop is in the young generation
inline bool JvmtiTagMap::is_in_young(oop o) {
assert(_young_gen.start() != NULL, "checking");
void* p = (void*)o;
bool in_young = _young_gen.contains(p);
return in_young;
}
// returns the appropriate hashmap for a given object
inline JvmtiTagHashmap* JvmtiTagMap::hashmap_for(oop o) {
if (is_in_young(o)) {
return _hashmap[0];
} else {
return _hashmap[1];
}
}
// create a JvmtiTagMap // create a JvmtiTagMap
JvmtiTagMap::JvmtiTagMap(JvmtiEnv* env) : JvmtiTagMap::JvmtiTagMap(JvmtiEnv* env) :
_env(env), _env(env),
@ -476,13 +423,7 @@ JvmtiTagMap::JvmtiTagMap(JvmtiEnv* env) :
assert(JvmtiThreadState_lock->is_locked(), "sanity check"); assert(JvmtiThreadState_lock->is_locked(), "sanity check");
assert(((JvmtiEnvBase *)env)->tag_map() == NULL, "tag map already exists for environment"); assert(((JvmtiEnvBase *)env)->tag_map() == NULL, "tag map already exists for environment");
// create the hashmaps _hashmap = new JvmtiTagHashmap();
for (int i=0; i<n_hashmaps; i++) {
_hashmap[i] = new JvmtiTagHashmap();
}
// get the memory region used by the young generation
get_young_generation();
// finally add us to the environment // finally add us to the environment
((JvmtiEnvBase *)env)->set_tag_map(this); ((JvmtiEnvBase *)env)->set_tag_map(this);
@ -496,24 +437,19 @@ JvmtiTagMap::~JvmtiTagMap() {
// also being destroryed. // also being destroryed.
((JvmtiEnvBase *)_env)->set_tag_map(NULL); ((JvmtiEnvBase *)_env)->set_tag_map(NULL);
// iterate over the hashmaps and destroy each of the entries JvmtiTagHashmapEntry** table = _hashmap->table();
for (int i=0; i<n_hashmaps; i++) { for (int j = 0; j < _hashmap->size(); j++) {
JvmtiTagHashmap* hashmap = _hashmap[i];
JvmtiTagHashmapEntry** table = hashmap->table();
for (int j=0; j<hashmap->size(); j++) {
JvmtiTagHashmapEntry* entry = table[j]; JvmtiTagHashmapEntry* entry = table[j];
while (entry != NULL) { while (entry != NULL) {
JvmtiTagHashmapEntry* next = entry->next(); JvmtiTagHashmapEntry* next = entry->next();
jweak ref = entry->object();
JNIHandles::destroy_weak_global(ref);
delete entry; delete entry;
entry = next; entry = next;
} }
} }
// finally destroy the hashmap // finally destroy the hashmap
delete hashmap; delete _hashmap;
} _hashmap = NULL;
// remove any entries on the free list // remove any entries on the free list
JvmtiTagHashmapEntry* entry = _free_entries; JvmtiTagHashmapEntry* entry = _free_entries;
@ -522,12 +458,13 @@ JvmtiTagMap::~JvmtiTagMap() {
delete entry; delete entry;
entry = next; entry = next;
} }
_free_entries = NULL;
} }
// create a hashmap entry // create a hashmap entry
// - if there's an entry on the (per-environment) free list then this // - if there's an entry on the (per-environment) free list then this
// is returned. Otherwise an new entry is allocated. // is returned. Otherwise an new entry is allocated.
JvmtiTagHashmapEntry* JvmtiTagMap::create_entry(jweak ref, jlong tag) { JvmtiTagHashmapEntry* JvmtiTagMap::create_entry(oop ref, jlong tag) {
assert(Thread::current()->is_VM_thread() || is_locked(), "checking"); assert(Thread::current()->is_VM_thread() || is_locked(), "checking");
JvmtiTagHashmapEntry* entry; JvmtiTagHashmapEntry* entry;
if (_free_entries == NULL) { if (_free_entries == NULL) {
@ -573,17 +510,13 @@ JvmtiTagMap* JvmtiTagMap::tag_map_for(JvmtiEnv* env) {
// iterate over all entries in the tag map. // iterate over all entries in the tag map.
void JvmtiTagMap::entry_iterate(JvmtiTagHashmapEntryClosure* closure) { void JvmtiTagMap::entry_iterate(JvmtiTagHashmapEntryClosure* closure) {
for (int i=0; i<n_hashmaps; i++) { hashmap()->entry_iterate(closure);
JvmtiTagHashmap* hashmap = _hashmap[i];
hashmap->entry_iterate(closure);
}
} }
// returns true if the hashmaps are empty // returns true if the hashmaps are empty
bool JvmtiTagMap::is_empty() { bool JvmtiTagMap::is_empty() {
assert(SafepointSynchronize::is_at_safepoint() || is_locked(), "checking"); assert(SafepointSynchronize::is_at_safepoint() || is_locked(), "checking");
assert(n_hashmaps == 2, "not implemented"); return hashmap()->entry_count() == 0;
return ((_hashmap[0]->entry_count() == 0) && (_hashmap[1]->entry_count() == 0));
} }
@ -591,7 +524,7 @@ bool JvmtiTagMap::is_empty() {
// not tagged // not tagged
// //
static inline jlong tag_for(JvmtiTagMap* tag_map, oop o) { static inline jlong tag_for(JvmtiTagMap* tag_map, oop o) {
JvmtiTagHashmapEntry* entry = tag_map->hashmap_for(o)->find(o); JvmtiTagHashmapEntry* entry = tag_map->hashmap()->find(o);
if (entry == NULL) { if (entry == NULL) {
return 0; return 0;
} else { } else {
@ -655,7 +588,7 @@ class CallbackWrapper : public StackObj {
// record the context // record the context
_tag_map = tag_map; _tag_map = tag_map;
_hashmap = tag_map->hashmap_for(_o); _hashmap = tag_map->hashmap();
_entry = _hashmap->find(_o); _entry = _hashmap->find(_o);
// get object tag // get object tag
@ -694,23 +627,18 @@ void inline CallbackWrapper::post_callback_tag_update(oop o,
if (obj_tag != 0) { if (obj_tag != 0) {
// callback has tagged the object // callback has tagged the object
assert(Thread::current()->is_VM_thread(), "must be VMThread"); assert(Thread::current()->is_VM_thread(), "must be VMThread");
HandleMark hm; entry = tag_map()->create_entry(o, obj_tag);
Handle h(o);
jweak ref = JNIHandles::make_weak_global(h);
entry = tag_map()->create_entry(ref, obj_tag);
hashmap->add(o, entry); hashmap->add(o, entry);
} }
} else { } else {
// object was previously tagged - the callback may have untagged // object was previously tagged - the callback may have untagged
// the object or changed the tag value // the object or changed the tag value
if (obj_tag == 0) { if (obj_tag == 0) {
jweak ref = entry->object();
JvmtiTagHashmapEntry* entry_removed = hashmap->remove(o); JvmtiTagHashmapEntry* entry_removed = hashmap->remove(o);
assert(entry_removed == entry, "checking"); assert(entry_removed == entry, "checking");
tag_map()->destroy_entry(entry); tag_map()->destroy_entry(entry);
JNIHandles::destroy_weak_global(ref);
} else { } else {
if (obj_tag != entry->tag()) { if (obj_tag != entry->tag()) {
entry->set_tag(obj_tag); entry->set_tag(obj_tag);
@ -760,7 +688,7 @@ class TwoOopCallbackWrapper : public CallbackWrapper {
// for Classes the klassOop is tagged // for Classes the klassOop is tagged
_referrer = klassOop_if_java_lang_Class(referrer); _referrer = klassOop_if_java_lang_Class(referrer);
// record the context // record the context
_referrer_hashmap = tag_map->hashmap_for(_referrer); _referrer_hashmap = tag_map->hashmap();
_referrer_entry = _referrer_hashmap->find(_referrer); _referrer_entry = _referrer_hashmap->find(_referrer);
// get object tag // get object tag
@ -796,8 +724,7 @@ class TwoOopCallbackWrapper : public CallbackWrapper {
// //
// This function is performance critical. If many threads attempt to tag objects // This function is performance critical. If many threads attempt to tag objects
// around the same time then it's possible that the Mutex associated with the // around the same time then it's possible that the Mutex associated with the
// tag map will be a hot lock. Eliminating this lock will not eliminate the issue // tag map will be a hot lock.
// because creating a JNI weak reference requires acquiring a global lock also.
void JvmtiTagMap::set_tag(jobject object, jlong tag) { void JvmtiTagMap::set_tag(jobject object, jlong tag) {
MutexLocker ml(lock()); MutexLocker ml(lock());
@ -808,22 +735,14 @@ void JvmtiTagMap::set_tag(jobject object, jlong tag) {
o = klassOop_if_java_lang_Class(o); o = klassOop_if_java_lang_Class(o);
// see if the object is already tagged // see if the object is already tagged
JvmtiTagHashmap* hashmap = hashmap_for(o); JvmtiTagHashmap* hashmap = _hashmap;
JvmtiTagHashmapEntry* entry = hashmap->find(o); JvmtiTagHashmapEntry* entry = hashmap->find(o);
// if the object is not already tagged then we tag it // if the object is not already tagged then we tag it
if (entry == NULL) { if (entry == NULL) {
if (tag != 0) { if (tag != 0) {
HandleMark hm; entry = create_entry(o, tag);
Handle h(o); hashmap->add(o, entry);
jweak ref = JNIHandles::make_weak_global(h);
// the object may have moved because make_weak_global may
// have blocked - thus it is necessary resolve the handle
// and re-hash the object.
o = h();
entry = create_entry(ref, tag);
hashmap_for(o)->add(o, entry);
} else { } else {
// no-op // no-op
} }
@ -831,13 +750,9 @@ void JvmtiTagMap::set_tag(jobject object, jlong tag) {
// if the object is already tagged then we either update // if the object is already tagged then we either update
// the tag (if a new tag value has been provided) // the tag (if a new tag value has been provided)
// or remove the object if the new tag value is 0. // or remove the object if the new tag value is 0.
// Removing the object requires that we also delete the JNI
// weak ref to the object.
if (tag == 0) { if (tag == 0) {
jweak ref = entry->object();
hashmap->remove(o); hashmap->remove(o);
destroy_entry(entry); destroy_entry(entry);
JNIHandles::destroy_weak_global(ref);
} else { } else {
entry->set_tag(tag); entry->set_tag(tag);
} }
@ -1626,8 +1541,8 @@ class TagObjectCollector : public JvmtiTagHashmapEntryClosure {
void do_entry(JvmtiTagHashmapEntry* entry) { void do_entry(JvmtiTagHashmapEntry* entry) {
for (int i=0; i<_tag_count; i++) { for (int i=0; i<_tag_count; i++) {
if (_tags[i] == entry->tag()) { if (_tags[i] == entry->tag()) {
oop o = JNIHandles::resolve(entry->object()); oop o = entry->object();
assert(o != NULL && o != JNIHandles::deleted_handle(), "sanity check"); assert(o != NULL, "sanity check");
// the mirror is tagged // the mirror is tagged
if (o->is_klass()) { if (o->is_klass()) {
@ -3374,62 +3289,21 @@ void JvmtiTagMap::follow_references(jint heap_filter,
} }
// called post-GC void JvmtiTagMap::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
// - for each JVMTI environment with an object tag map, call its rehash assert(SafepointSynchronize::is_at_safepoint(),
// function to re-sync with the new object locations. "must be executed at a safepoint");
void JvmtiTagMap::gc_epilogue(bool full) {
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
if (JvmtiEnv::environments_might_exist()) { if (JvmtiEnv::environments_might_exist()) {
// re-obtain the memory region for the young generation (might
// changed due to adaptive resizing policy)
get_young_generation();
JvmtiEnvIterator it; JvmtiEnvIterator it;
for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) { for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
JvmtiTagMap* tag_map = env->tag_map(); JvmtiTagMap* tag_map = env->tag_map();
if (tag_map != NULL && !tag_map->is_empty()) { if (tag_map != NULL && !tag_map->is_empty()) {
TraceTime t(full ? "JVMTI Full Rehash " : "JVMTI Rehash ", TraceJVMTIObjectTagging); tag_map->do_weak_oops(is_alive, f);
if (full) {
tag_map->rehash(0, n_hashmaps);
} else {
tag_map->rehash(0, 0); // tag map for young gen only
}
}
}
}
}
// CMS has completed referencing processing so we may have JNI weak refs
// to objects in the CMS generation that have been GC'ed.
void JvmtiTagMap::cms_ref_processing_epilogue() {
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
assert(UseConcMarkSweepGC, "should only be used with CMS");
if (JvmtiEnv::environments_might_exist()) {
JvmtiEnvIterator it;
for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
JvmtiTagMap* tag_map = ((JvmtiEnvBase *)env)->tag_map();
if (tag_map != NULL && !tag_map->is_empty()) {
TraceTime t("JVMTI Rehash (CMS) ", TraceJVMTIObjectTagging);
tag_map->rehash(1, n_hashmaps); // assume CMS not used in young gen
} }
} }
} }
} }
void JvmtiTagMap::do_weak_oops(BoolObjectClosure* is_alive, OopClosure* f) {
// For each entry in the hashmaps 'start' to 'end' :
//
// 1. resolve the JNI weak reference
//
// 2. If it resolves to NULL it means the object has been freed so the entry
// is removed, the weak reference destroyed, and the object free event is
// posted (if enabled).
//
// 3. If the weak reference resolves to an object then we re-hash the object
// to see if it has moved or has been promoted (from the young to the old
// generation for example).
//
void JvmtiTagMap::rehash(int start, int end) {
// does this environment have the OBJECT_FREE event enabled // does this environment have the OBJECT_FREE event enabled
bool post_object_free = env()->is_enabled(JVMTI_EVENT_OBJECT_FREE); bool post_object_free = env()->is_enabled(JVMTI_EVENT_OBJECT_FREE);
@ -3437,32 +3311,15 @@ void JvmtiTagMap::rehash(int start, int end) {
// counters used for trace message // counters used for trace message
int freed = 0; int freed = 0;
int moved = 0; int moved = 0;
int promoted = 0;
// we assume there are two hashmaps - one for the young generation JvmtiTagHashmap* hashmap = this->hashmap();
// and the other for all other spaces.
assert(n_hashmaps == 2, "not implemented");
JvmtiTagHashmap* young_hashmap = _hashmap[0];
JvmtiTagHashmap* other_hashmap = _hashmap[1];
// reenable sizing (if disabled) // reenable sizing (if disabled)
young_hashmap->set_resizing_enabled(true); hashmap->set_resizing_enabled(true);
other_hashmap->set_resizing_enabled(true);
// when re-hashing the hashmap corresponding to the young generation we
// collect the entries corresponding to objects that have been promoted.
JvmtiTagHashmapEntry* promoted_entries = NULL;
if (end >= n_hashmaps) {
end = n_hashmaps - 1;
}
for (int i=start; i <= end; i++) {
JvmtiTagHashmap* hashmap = _hashmap[i];
// if the hashmap is empty then we can skip it // if the hashmap is empty then we can skip it
if (hashmap->_entry_count == 0) { if (hashmap->_entry_count == 0) {
continue; return;
} }
// now iterate through each entry in the table // now iterate through each entry in the table
@ -3470,18 +3327,19 @@ void JvmtiTagMap::rehash(int start, int end) {
JvmtiTagHashmapEntry** table = hashmap->table(); JvmtiTagHashmapEntry** table = hashmap->table();
int size = hashmap->size(); int size = hashmap->size();
for (int pos=0; pos<size; pos++) { JvmtiTagHashmapEntry* delayed_add = NULL;
for (int pos = 0; pos < size; ++pos) {
JvmtiTagHashmapEntry* entry = table[pos]; JvmtiTagHashmapEntry* entry = table[pos];
JvmtiTagHashmapEntry* prev = NULL; JvmtiTagHashmapEntry* prev = NULL;
while (entry != NULL) { while (entry != NULL) {
JvmtiTagHashmapEntry* next = entry->next(); JvmtiTagHashmapEntry* next = entry->next();
jweak ref = entry->object(); oop* obj = entry->object_addr();
oop oop = JNIHandles::resolve(ref);
// has object been GC'ed // has object been GC'ed
if (oop == NULL) { if (!is_alive->do_object_b(entry->object())) {
// grab the tag // grab the tag
jlong tag = entry->tag(); jlong tag = entry->tag();
guarantee(tag != 0, "checking"); guarantee(tag != 0, "checking");
@ -3491,89 +3349,60 @@ void JvmtiTagMap::rehash(int start, int end) {
hashmap->remove(prev, pos, entry); hashmap->remove(prev, pos, entry);
destroy_entry(entry); destroy_entry(entry);
// destroy the weak ref
JNIHandles::destroy_weak_global(ref);
// post the event to the profiler // post the event to the profiler
if (post_object_free) { if (post_object_free) {
JvmtiExport::post_object_free(env(), tag); JvmtiExport::post_object_free(env(), tag);
} }
freed++; ++freed;
entry = next;
continue;
}
// if this is the young hashmap then the object is either promoted
// or moved.
// if this is the other hashmap then the object is moved.
bool same_gen;
if (i == 0) {
assert(hashmap == young_hashmap, "checking");
same_gen = is_in_young(oop);
} else { } else {
same_gen = true; f->do_oop(entry->object_addr());
} oop new_oop = entry->object();
if (same_gen) {
// if the object has moved then re-hash it and move its // if the object has moved then re-hash it and move its
// entry to its new location. // entry to its new location.
unsigned int new_pos = JvmtiTagHashmap::hash(oop, size); unsigned int new_pos = JvmtiTagHashmap::hash(new_oop, size);
if (new_pos != (unsigned int)pos) { if (new_pos != (unsigned int)pos) {
if (prev == NULL) { if (prev == NULL) {
table[pos] = next; table[pos] = next;
} else { } else {
prev->set_next(next); prev->set_next(next);
} }
if (new_pos < (unsigned int)pos) {
entry->set_next(table[new_pos]); entry->set_next(table[new_pos]);
table[new_pos] = entry; table[new_pos] = entry;
} else {
// Delay adding this entry to it's new position as we'd end up
// hitting it again during this iteration.
entry->set_next(delayed_add);
delayed_add = entry;
}
moved++; moved++;
} else { } else {
// object didn't move // object didn't move
prev = entry; prev = entry;
} }
} else {
// object has been promoted so remove the entry from the
// young hashmap
assert(hashmap == young_hashmap, "checking");
hashmap->remove(prev, pos, entry);
// move the entry to the promoted list
entry->set_next(promoted_entries);
promoted_entries = entry;
} }
entry = next; entry = next;
} }
} }
}
// Re-add all the entries which were kept aside
// add the entries, corresponding to the promoted objects, to the while (delayed_add != NULL) {
// other hashmap. JvmtiTagHashmapEntry* next = delayed_add->next();
JvmtiTagHashmapEntry* entry = promoted_entries; unsigned int pos = JvmtiTagHashmap::hash(delayed_add->object(), size);
while (entry != NULL) { delayed_add->set_next(table[pos]);
oop o = JNIHandles::resolve(entry->object()); table[pos] = delayed_add;
assert(hashmap_for(o) == other_hashmap, "checking"); delayed_add = next;
JvmtiTagHashmapEntry* next = entry->next();
other_hashmap->add(o, entry);
entry = next;
promoted++;
} }
// stats // stats
if (TraceJVMTIObjectTagging) { if (TraceJVMTIObjectTagging) {
int total_moves = promoted + moved; int post_total = hashmap->_entry_count;
int post_total = 0;
for (int i=0; i<n_hashmaps; i++) {
post_total += _hashmap[i]->_entry_count;
}
int pre_total = post_total + freed; int pre_total = post_total + freed;
tty->print("(%d->%d, %d freed, %d promoted, %d total moves)", tty->print_cr("(%d->%d, %d freed, %d total moves)",
pre_total, post_total, freed, promoted, total_moves); pre_total, post_total, freed, moved);
} }
} }

View File

@ -45,17 +45,12 @@ class JvmtiTagMap : public CHeapObj {
private: private:
enum{ enum{
n_hashmaps = 2, // encapsulates 2 hashmaps
max_free_entries = 4096 // maximum number of free entries per env max_free_entries = 4096 // maximum number of free entries per env
}; };
// memory region for young generation
static MemRegion _young_gen;
static void get_young_generation();
JvmtiEnv* _env; // the jvmti environment JvmtiEnv* _env; // the jvmti environment
Mutex _lock; // lock for this tag map Mutex _lock; // lock for this tag map
JvmtiTagHashmap* _hashmap[n_hashmaps]; // the hashmaps JvmtiTagHashmap* _hashmap; // the hashmap
JvmtiTagHashmapEntry* _free_entries; // free list for this environment JvmtiTagHashmapEntry* _free_entries; // free list for this environment
int _free_entries_count; // number of entries on the free list int _free_entries_count; // number of entries on the free list
@ -67,11 +62,7 @@ class JvmtiTagMap : public CHeapObj {
inline Mutex* lock() { return &_lock; } inline Mutex* lock() { return &_lock; }
inline JvmtiEnv* env() const { return _env; } inline JvmtiEnv* env() const { return _env; }
// rehash tags maps for generation start to end void do_weak_oops(BoolObjectClosure* is_alive, OopClosure* f);
void rehash(int start, int end);
// indicates if the object is in the young generation
static bool is_in_young(oop o);
// iterate over all entries in this tag map // iterate over all entries in this tag map
void entry_iterate(JvmtiTagHashmapEntryClosure* closure); void entry_iterate(JvmtiTagHashmapEntryClosure* closure);
@ -81,11 +72,10 @@ class JvmtiTagMap : public CHeapObj {
// indicates if this tag map is locked // indicates if this tag map is locked
bool is_locked() { return lock()->is_locked(); } bool is_locked() { return lock()->is_locked(); }
// return the appropriate hashmap for a given object JvmtiTagHashmap* hashmap() { return _hashmap; }
JvmtiTagHashmap* hashmap_for(oop o);
// create/destroy entries // create/destroy entries
JvmtiTagHashmapEntry* create_entry(jweak ref, jlong tag); JvmtiTagHashmapEntry* create_entry(oop ref, jlong tag);
void destroy_entry(JvmtiTagHashmapEntry* entry); void destroy_entry(JvmtiTagHashmapEntry* entry);
// returns true if the hashmaps are empty // returns true if the hashmaps are empty
@ -134,11 +124,8 @@ class JvmtiTagMap : public CHeapObj {
jint* count_ptr, jobject** object_result_ptr, jint* count_ptr, jobject** object_result_ptr,
jlong** tag_result_ptr); jlong** tag_result_ptr);
// call post-GC to rehash the tag maps. static void weak_oops_do(
static void gc_epilogue(bool full); BoolObjectClosure* is_alive, OopClosure* f) KERNEL_RETURN;
// call after referencing processing has completed (CMS)
static void cms_ref_processing_epilogue();
}; };
#endif // SHARE_VM_PRIMS_JVMTITAGMAP_HPP #endif // SHARE_VM_PRIMS_JVMTITAGMAP_HPP

View File

@ -1198,9 +1198,6 @@ class CommandLineFlags {
product(ccstr, TraceJVMTI, NULL, \ product(ccstr, TraceJVMTI, NULL, \
"Trace flags for JVMTI functions and events") \ "Trace flags for JVMTI functions and events") \
\ \
product(bool, ForceFullGCJVMTIEpilogues, false, \
"Force 'Full GC' was done semantics for JVMTI GC epilogues") \
\
/* This option can change an EMCP method into an obsolete method. */ \ /* This option can change an EMCP method into an obsolete method. */ \
/* This can affect tests that except specific methods to be EMCP. */ \ /* This can affect tests that except specific methods to be EMCP. */ \
/* This option should be used with caution. */ \ /* This option should be used with caution. */ \

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "classfile/systemDictionary.hpp" #include "classfile/systemDictionary.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "prims/jvmtiTagMap.hpp"
#include "runtime/jniHandles.hpp" #include "runtime/jniHandles.hpp"
#include "runtime/mutexLocker.hpp" #include "runtime/mutexLocker.hpp"
#ifdef TARGET_OS_FAMILY_linux #ifdef TARGET_OS_FAMILY_linux
@ -428,6 +429,12 @@ void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive,
break; break;
} }
} }
/*
* JvmtiTagMap may also contain weak oops. The iteration of it is placed
* here so that we don't need to add it to each of the collectors.
*/
JvmtiTagMap::weak_oops_do(is_alive, f);
} }