8247878: Move Management strong oops to OopStorage

Use OopStorage for strong oops stored with memory and thread sampling and dumping, and remove oops_do and GC calls.

Reviewed-by: dholmes, kbarrett
This commit is contained in:
Coleen Phillimore 2020-07-20 07:50:27 -04:00
parent c7d84850bf
commit 99eccaf6ec
34 changed files with 134 additions and 264 deletions

@ -60,7 +60,6 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
_gc_par_phases[ThreadRoots] = new WorkerDataArray<double>("ThreadRoots", "Thread Roots (ms):", max_gc_threads);
_gc_par_phases[UniverseRoots] = new WorkerDataArray<double>("UniverseRoots", "Universe Roots (ms):", max_gc_threads);
_gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>("ObjectSynchronizerRoots", "ObjectSynchronizer Roots (ms):", max_gc_threads);
_gc_par_phases[ManagementRoots] = new WorkerDataArray<double>("ManagementRoots", "Management Roots (ms):", max_gc_threads);
_gc_par_phases[CLDGRoots] = new WorkerDataArray<double>("CLDGRoots", "CLDG Roots (ms):", max_gc_threads);
AOT_ONLY(_gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>("AOTCodeRoots", "AOT Root Scan (ms):", max_gc_threads);)
_gc_par_phases[CMRefRoots] = new WorkerDataArray<double>("CMRefRoots", "CM RefProcessor Roots (ms):", max_gc_threads);

@ -50,7 +50,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
ThreadRoots,
UniverseRoots,
ObjectSynchronizerRoots,
ManagementRoots,
CLDGRoots,
AOT_ONLY(AOTCodeRoots COMMA)
CMRefRoots,

@ -45,7 +45,6 @@
#include "memory/allocation.inline.hpp"
#include "memory/universe.hpp"
#include "runtime/mutex.hpp"
#include "services/management.hpp"
#include "utilities/macros.hpp"
G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
@ -196,13 +195,6 @@ void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
}
}
{
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_id);
if (_process_strong_tasks.try_claim_task(G1RP_PS_Management_oops_do)) {
Management::oops_do(strong_roots);
}
}
#if INCLUDE_AOT
if (UseAOT) {
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_id);

@ -75,7 +75,6 @@
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
#include "services/management.hpp"
#include "services/memTracker.hpp"
#include "services/memoryService.hpp"
#include "utilities/align.hpp"
@ -2020,10 +2019,6 @@ static void mark_from_roots_work(ParallelRootType::Value root_type, uint worker_
ObjectSynchronizer::oops_do(&mark_and_push_closure);
break;
case ParallelRootType::management:
Management::oops_do(&mark_and_push_closure);
break;
case ParallelRootType::class_loader_data:
{
CLDToOopClosure cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_strong);
@ -2236,7 +2231,6 @@ void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
Universe::oops_do(&oop_closure);
Threads::oops_do(&oop_closure, NULL);
ObjectSynchronizer::oops_do(&oop_closure);
Management::oops_do(&oop_closure);
OopStorageSet::strong_oops_do(&oop_closure);
CLDToOopClosure cld_closure(&oop_closure, ClassLoaderData::_claim_strong);
ClassLoaderDataGraph::cld_do(&cld_closure);

@ -36,7 +36,6 @@ public:
enum Value {
universe,
object_synchronizer,
management,
class_loader_data,
code_cache,
//"threads" are handled in parallel as a special case

@ -67,7 +67,6 @@
#include "runtime/threadCritical.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vmOperations.hpp"
#include "services/management.hpp"
#include "services/memoryService.hpp"
#include "utilities/stack.inline.hpp"
@ -108,10 +107,6 @@ static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_i
}
break;
case ParallelRootType::management:
Management::oops_do(&roots_closure);
break;
case ParallelRootType::code_cache:
{
MarkingCodeBlobClosure code_closure(&roots_to_old_closure, CodeBlobToOopClosure::FixRelocations);

@ -67,7 +67,6 @@
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/vmThread.hpp"
#include "services/management.hpp"
#include "services/memoryService.hpp"
#include "utilities/autoRestore.hpp"
#include "utilities/debug.hpp"
@ -829,9 +828,6 @@ void GenCollectedHeap::process_roots(StrongRootsScope* scope,
if (_process_strong_tasks->try_claim_task(GCH_PS_ObjectSynchronizer_oops_do)) {
ObjectSynchronizer::oops_do(strong_roots);
}
if (_process_strong_tasks->try_claim_task(GCH_PS_Management_oops_do)) {
Management::oops_do(strong_roots);
}
#if INCLUDE_AOT
if (UseAOT && _process_strong_tasks->try_claim_task(GCH_PS_aot_oops_do)) {
AOTLoader::oops_do(strong_roots);

@ -107,7 +107,6 @@ protected:
enum GCH_strong_roots_tasks {
GCH_PS_Universe_oops_do,
GCH_PS_ObjectSynchronizer_oops_do,
GCH_PS_Management_oops_do,
GCH_PS_OopStorageSet_oops_do,
GCH_PS_ClassLoaderDataGraph_oops_do,
GCH_PS_CodeCache_oops_do,

@ -41,7 +41,6 @@ class outputStream;
f(CNT_PREFIX ## VMStrongRoots, DESC_PREFIX "VM Strong Roots") \
f(CNT_PREFIX ## VMWeakRoots, DESC_PREFIX "VM Weak Roots") \
f(CNT_PREFIX ## ObjectSynchronizerRoots, DESC_PREFIX "Synchronizer Roots") \
f(CNT_PREFIX ## ManagementRoots, DESC_PREFIX "Management Roots") \
f(CNT_PREFIX ## CLDGRoots, DESC_PREFIX "CLDG Roots") \
f(CNT_PREFIX ## JVMTIWeakRoots, DESC_PREFIX "JVMTI Weak Roots") \
f(CNT_PREFIX ## JFRWeakRoots, DESC_PREFIX "JFR Weak Roots") \

@ -40,7 +40,6 @@
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "runtime/thread.hpp"
#include "services/management.hpp"
ShenandoahSerialRoot::ShenandoahSerialRoot(ShenandoahSerialRoot::OopsDo oops_do,
ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::ParPhase par_phase) :
@ -56,14 +55,12 @@ void ShenandoahSerialRoot::oops_do(OopClosure* cl, uint worker_id) {
ShenandoahSerialRoots::ShenandoahSerialRoots(ShenandoahPhaseTimings::Phase phase) :
_universe_root(&Universe::oops_do, phase, ShenandoahPhaseTimings::UniverseRoots),
_object_synchronizer_root(&ObjectSynchronizer::oops_do, phase, ShenandoahPhaseTimings::ObjectSynchronizerRoots),
_management_root(&Management::oops_do, phase, ShenandoahPhaseTimings::ManagementRoots) {
_object_synchronizer_root(&ObjectSynchronizer::oops_do, phase, ShenandoahPhaseTimings::ObjectSynchronizerRoots) {
}
void ShenandoahSerialRoots::oops_do(OopClosure* cl, uint worker_id) {
_universe_root.oops_do(cl, worker_id);
_object_synchronizer_root.oops_do(cl, worker_id);
_management_root.oops_do(cl, worker_id);
}
ShenandoahWeakSerialRoot::ShenandoahWeakSerialRoot(ShenandoahWeakSerialRoot::WeakOopsDo weak_oops_do,

@ -53,7 +53,6 @@ class ShenandoahSerialRoots {
private:
ShenandoahSerialRoot _universe_root;
ShenandoahSerialRoot _object_synchronizer_root;
ShenandoahSerialRoot _management_root;
public:
ShenandoahSerialRoots(ShenandoahPhaseTimings::Phase phase);
void oops_do(OopClosure* cl, uint worker_id);

@ -39,7 +39,6 @@
#include "gc/shared/weakProcessor.inline.hpp"
#include "memory/universe.hpp"
#include "runtime/thread.hpp"
#include "services/management.hpp"
#include "utilities/debug.hpp"
// Check for overflow of number of root types.
@ -77,7 +76,6 @@ void ShenandoahRootVerifier::oops_do(OopClosure* oops) {
if (verify(SerialRoots)) {
shenandoah_assert_safepoint();
Universe::oops_do(oops);
Management::oops_do(oops);
ObjectSynchronizer::oops_do(oops);
}
@ -122,7 +120,6 @@ void ShenandoahRootVerifier::roots_do(OopClosure* oops) {
ClassLoaderDataGraph::cld_do(&clds);
Universe::oops_do(oops);
Management::oops_do(oops);
JNIHandles::oops_do(oops);
ObjectSynchronizer::oops_do(oops);
Universe::vm_global()->oops_do(oops);
@ -149,7 +146,6 @@ void ShenandoahRootVerifier::strong_roots_do(OopClosure* oops) {
ClassLoaderDataGraph::roots_cld_do(&clds, NULL);
Universe::oops_do(oops);
Management::oops_do(oops);
JNIHandles::oops_do(oops);
ObjectSynchronizer::oops_do(oops);
Universe::vm_global()->oops_do(oops);

@ -49,7 +49,6 @@
#include "runtime/synchronizer.hpp"
#include "runtime/thread.hpp"
#include "runtime/vmThread.hpp"
#include "services/management.hpp"
#include "utilities/debug.hpp"
#if INCLUDE_JFR
#include "jfr/jfr.hpp"
@ -60,7 +59,6 @@ static const ZStatSubPhase ZSubPhasePauseRoots("Pause Roots");
static const ZStatSubPhase ZSubPhasePauseRootsTeardown("Pause Roots Teardown");
static const ZStatSubPhase ZSubPhasePauseRootsUniverse("Pause Roots Universe");
static const ZStatSubPhase ZSubPhasePauseRootsObjectSynchronizer("Pause Roots ObjectSynchronizer");
static const ZStatSubPhase ZSubPhasePauseRootsManagement("Pause Roots Management");
static const ZStatSubPhase ZSubPhasePauseRootsJVMTIWeakExport("Pause Roots JVMTIWeakExport");
static const ZStatSubPhase ZSubPhasePauseRootsVMThread("Pause Roots VM Thread");
static const ZStatSubPhase ZSubPhasePauseRootsJavaThreads("Pause Roots Java Threads");
@ -193,7 +191,6 @@ ZRootsIterator::ZRootsIterator(bool visit_jvmti_weak_export) :
_java_threads_iter(),
_universe(this),
_object_synchronizer(this),
_management(this),
_jvmti_weak_export(this),
_vm_thread(this),
_java_threads(this),
@ -230,11 +227,6 @@ void ZRootsIterator::do_object_synchronizer(ZRootsIteratorClosure* cl) {
ObjectSynchronizer::oops_do(cl);
}
void ZRootsIterator::do_management(ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhasePauseRootsManagement);
Management::oops_do(cl);
}
void ZRootsIterator::do_jvmti_weak_export(ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhasePauseRootsJVMTIWeakExport);
AlwaysTrueClosure always_alive;
@ -262,7 +254,6 @@ void ZRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhasePauseRoots);
_universe.oops_do(cl);
_object_synchronizer.oops_do(cl);
_management.oops_do(cl);
_vm_thread.oops_do(cl);
_java_threads.oops_do(cl);
if (!ClassUnloading) {

@ -112,7 +112,6 @@ private:
void do_universe(ZRootsIteratorClosure* cl);
void do_object_synchronizer(ZRootsIteratorClosure* cl);
void do_management(ZRootsIteratorClosure* cl);
void do_jvmti_weak_export(ZRootsIteratorClosure* cl);
void do_vm_thread(ZRootsIteratorClosure* cl);
void do_java_threads(ZRootsIteratorClosure* cl);
@ -120,7 +119,6 @@ private:
ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_universe> _universe;
ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_object_synchronizer> _object_synchronizer;
ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_management> _management;
ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_jvmti_weak_export> _jvmti_weak_export;
ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_vm_thread> _vm_thread;
ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_java_threads> _java_threads;

@ -75,7 +75,6 @@ void RootSetClosure<Delegate>::process() {
ObjectSynchronizer::oops_do(this);
Universe::oops_do(this);
OopStorageSet::strong_oops_do(this);
Management::oops_do(this);
AOTLoader::oops_do(this);
}

@ -99,7 +99,6 @@ class ReferenceToRootClosure : public StackObj {
bool do_object_synchronizer_roots();
bool do_universe_roots();
bool do_oop_storage_roots();
bool do_management_roots();
bool do_string_table_roots();
bool do_aot_loader_roots();
@ -164,13 +163,6 @@ bool ReferenceToRootClosure::do_oop_storage_roots() {
return false;
}
bool ReferenceToRootClosure::do_management_roots() {
assert(!complete(), "invariant");
ReferenceLocateClosure rlc(_callback, OldObjectRoot::_management, OldObjectRoot::_type_undetermined, NULL);
Management::oops_do(&rlc);
return rlc.complete();
}
bool ReferenceToRootClosure::do_aot_loader_roots() {
assert(!complete(), "invariant");
ReferenceLocateClosure rcl(_callback, OldObjectRoot::_aot, OldObjectRoot::_type_undetermined, NULL);
@ -203,11 +195,6 @@ bool ReferenceToRootClosure::do_roots() {
return true;
}
if (do_management_roots()) {
_complete = true;
return true;
}
if (do_aot_loader_roots()) {
_complete = true;
return true;

@ -58,8 +58,6 @@ const char* OldObjectRoot::system_description(System system) {
return "Object Monitor";
case _class_loader_data:
return "Class Loader Data";
case _management:
return "Management";
case _code_cache:
return "Code Cache";
case _aot:

@ -38,7 +38,6 @@ class OldObjectRoot : public AllStatic {
_strong_oop_storage_set_last = _strong_oop_storage_set_first + OopStorageSet::strong_count - 1,
_object_synchronizer,
_class_loader_data,
_management,
_code_cache,
_aot,
JVMCI_ONLY(_jvmci COMMA)

@ -25,17 +25,15 @@
#ifndef SHARE_OOPS_OOPHANDLE_HPP
#define SHARE_OOPS_OOPHANDLE_HPP
#include "metaprogramming/primitiveConversions.hpp"
#include "oops/oopsHierarchy.hpp"
class OopStorage;
// Simple class for encapsulating oop pointers stored in metadata.
// These are different from Handle. The Handle class stores pointers
// to oops on the stack, and manages the allocation from a thread local
// area in the constructor.
// This assumes that the caller will allocate the handle in the appropriate
// area. The reason for the encapsulation is to help with naming and to allow
// future uses for read barriers.
// Simple classes for wrapping oop and atomically accessed oop pointers
// stored in OopStorage, or stored in the ClassLoaderData handles area.
// These classes help with allocation, release, and NativeAccess loads and
// stores with the appropriate barriers.
class OopHandle {
friend class VMStructs;
@ -56,4 +54,15 @@ public:
oop* ptr_raw() const { return _obj; }
};
// Convert OopHandle to oop*
template<>
struct PrimitiveConversions::Translate<OopHandle> : public TrueType {
typedef OopHandle Value;
typedef oop* Decayed;
static Decayed decay(Value x) { return x.ptr_raw(); }
static Value recover(Decayed x) { return OopHandle(x); }
};
#endif // SHARE_OOPS_OOPHANDLE_HPP

@ -47,9 +47,11 @@ inline OopHandle::OopHandle(OopStorage* storage, oop obj) :
}
inline void OopHandle::release(OopStorage* storage) {
// Clear the OopHandle first
NativeAccess<>::oop_store(_obj, (oop)NULL);
storage->release(_obj);
if (peek() != NULL) {
// Clear the OopHandle first
NativeAccess<>::oop_store(_obj, (oop)NULL);
storage->release(_obj);
}
}
#endif // SHARE_OOPS_OOPHANDLE_INLINE_HPP

@ -26,7 +26,9 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oopHandle.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
@ -162,13 +164,18 @@ void LowMemoryDetector::recompute_enabled_for_collected_pools() {
}
SensorInfo::SensorInfo() {
_sensor_obj = NULL;
_sensor_on = false;
_sensor_count = 0;
_pending_trigger_count = 0;
_pending_clear_count = 0;
}
void SensorInfo::set_sensor(instanceOop sensor) {
assert(_sensor_obj.peek() == NULL, "Should be set only once");
_sensor_obj = OopHandle(Universe::vm_global(), sensor);
}
// When this method is used, the memory usage is monitored
// as a gauge attribute. Sensor notifications (trigger or
// clear) is only emitted at the first time it crosses
@ -277,10 +284,6 @@ void SensorInfo::set_counter_sensor_level(MemoryUsage usage, ThresholdSupport* c
}
}
void SensorInfo::oops_do(OopClosure* f) {
f->do_oop((oop*) &_sensor_obj);
}
void SensorInfo::process_pending_requests(TRAPS) {
int pending_count = pending_trigger_count();
if (pending_clear_count() > 0) {
@ -293,10 +296,9 @@ void SensorInfo::process_pending_requests(TRAPS) {
void SensorInfo::trigger(int count, TRAPS) {
assert(count <= _pending_trigger_count, "just checking");
if (_sensor_obj != NULL) {
Handle sensor_h(THREAD, _sensor_obj.resolve());
if (sensor_h() != NULL) {
InstanceKlass* sensorKlass = Management::sun_management_Sensor_klass(CHECK);
Handle sensor_h(THREAD, _sensor_obj);
Symbol* trigger_method_signature;
JavaValue result(T_VOID);
@ -358,10 +360,9 @@ void SensorInfo::clear(int count, TRAPS) {
_pending_trigger_count = _pending_trigger_count - count;
}
if (_sensor_obj != NULL) {
Handle sensor(THREAD, _sensor_obj.resolve());
if (sensor() != NULL) {
InstanceKlass* sensorKlass = Management::sun_management_Sensor_klass(CHECK);
Handle sensor(THREAD, _sensor_obj);
JavaValue result(T_VOID);
JavaCallArguments args(sensor);
args.push_int((int) count);

@ -26,6 +26,7 @@
#define SHARE_SERVICES_LOWMEMORYDETECTOR_HPP
#include "memory/allocation.hpp"
#include "oops/oopHandle.hpp"
#include "runtime/atomic.hpp"
#include "services/memoryPool.hpp"
#include "services/memoryService.hpp"
@ -117,7 +118,7 @@ class ThresholdSupport : public CHeapObj<mtInternal> {
class SensorInfo : public CHeapObj<mtInternal> {
private:
instanceOop _sensor_obj;
OopHandle _sensor_obj;
bool _sensor_on;
size_t _sensor_count;
@ -141,10 +142,7 @@ private:
void trigger(int count, TRAPS);
public:
SensorInfo();
void set_sensor(instanceOop sensor) {
assert(_sensor_obj == NULL, "Should be set only once");
_sensor_obj = sensor;
}
void set_sensor(instanceOop sensor);
bool has_pending_requests() {
return (_pending_trigger_count > 0 || _pending_clear_count > 0);
@ -205,7 +203,6 @@ public:
void set_counter_sensor_level(MemoryUsage usage, ThresholdSupport* counter_threshold);
void process_pending_requests(TRAPS);
void oops_do(OopClosure* f);
#ifndef PRODUCT
// printing on default output stream;

@ -218,11 +218,6 @@ jlong Management::timestamp() {
return t.ticks() - _stamp.ticks();
}
void Management::oops_do(OopClosure* f) {
MemoryService::oops_do(f);
ThreadService::oops_do(f);
}
InstanceKlass* Management::java_lang_management_ThreadInfo_klass(TRAPS) {
if (_threadInfo_klass == NULL) {
_threadInfo_klass = load_and_initialize_klass(vmSymbols::java_lang_management_ThreadInfo(), CHECK_NULL);
@ -1232,9 +1227,9 @@ JVM_ENTRY(jobjectArray, jmm_DumpThreads(JNIEnv *env, jlongArray thread_ids, jboo
for (int depth = 0; depth < num_frames; depth++) {
StackFrameInfo* frame = stacktrace->stack_frame_at(depth);
int len = frame->num_locked_monitors();
GrowableArray<oop>* locked_monitors = frame->locked_monitors();
GrowableArray<OopHandle>* locked_monitors = frame->locked_monitors();
for (j = 0; j < len; j++) {
oop monitor = locked_monitors->at(j);
oop monitor = locked_monitors->at(j).resolve();
assert(monitor != NULL, "must be a Java object");
monitors_array->obj_at_put(count, monitor);
depths_array->int_at_put(count, depth);
@ -1242,9 +1237,9 @@ JVM_ENTRY(jobjectArray, jmm_DumpThreads(JNIEnv *env, jlongArray thread_ids, jboo
}
}
GrowableArray<oop>* jni_locked_monitors = stacktrace->jni_locked_monitors();
GrowableArray<OopHandle>* jni_locked_monitors = stacktrace->jni_locked_monitors();
for (j = 0; j < jni_locked_monitors->length(); j++) {
oop object = jni_locked_monitors->at(j);
oop object = jni_locked_monitors->at(j).resolve();
assert(object != NULL, "must be a Java object");
monitors_array->obj_at_put(count, object);
// Monitor locked via JNI MonitorEnter call doesn't have stack depth info
@ -1258,7 +1253,7 @@ JVM_ENTRY(jobjectArray, jmm_DumpThreads(JNIEnv *env, jlongArray thread_ids, jboo
// Create Object[] filled with locked JSR-166 synchronizers
assert(ts->threadObj() != NULL, "Must be a valid JavaThread");
ThreadConcurrentLocks* tcl = ts->get_concurrent_locks();
GrowableArray<instanceOop>* locks = (tcl != NULL ? tcl->owned_locks() : NULL);
GrowableArray<OopHandle>* locks = (tcl != NULL ? tcl->owned_locks() : NULL);
int num_locked_synchronizers = (locks != NULL ? locks->length() : 0);
objArrayOop array = oopFactory::new_objArray(SystemDictionary::Object_klass(), num_locked_synchronizers, CHECK_NULL);
@ -1266,7 +1261,7 @@ JVM_ENTRY(jobjectArray, jmm_DumpThreads(JNIEnv *env, jlongArray thread_ids, jboo
synchronizers_array = sh;
for (int k = 0; k < num_locked_synchronizers; k++) {
synchronizers_array->obj_at_put(k, locks->at(k));
synchronizers_array->obj_at_put(k, locks->at(k).resolve());
}
}

@ -63,7 +63,6 @@ public:
static jlong ticks_to_ms(jlong ticks) NOT_MANAGEMENT_RETURN_(0L);
static jlong timestamp() NOT_MANAGEMENT_RETURN_(0L);
static void oops_do(OopClosure* f) NOT_MANAGEMENT_RETURN;
static void* get_jmm_interface(int version);
static void get_optional_support(jmmOptionalSupport* support);

@ -26,6 +26,7 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
@ -39,7 +40,7 @@
#include "utilities/dtrace.hpp"
MemoryManager::MemoryManager(const char* name) :
_num_pools(0), _name(name), _memory_mgr_obj() {}
_num_pools(0), _name(name) {}
int MemoryManager::add_pool(MemoryPool* pool) {
int index = _num_pools;
@ -53,7 +54,7 @@ int MemoryManager::add_pool(MemoryPool* pool) {
}
bool MemoryManager::is_manager(instanceHandle mh) const {
return mh() == Atomic::load(&_memory_mgr_obj);
return mh() == Atomic::load(&_memory_mgr_obj).resolve();
}
MemoryManager* MemoryManager::get_code_cache_memory_manager() {
@ -67,7 +68,7 @@ MemoryManager* MemoryManager::get_metaspace_memory_manager() {
instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
// Must do an acquire so as to force ordering of subsequent
// loads from anything _memory_mgr_obj points to or implies.
instanceOop mgr_obj = Atomic::load_acquire(&_memory_mgr_obj);
oop mgr_obj = Atomic::load_acquire(&_memory_mgr_obj).resolve();
if (mgr_obj == NULL) {
// It's ok for more than one thread to execute the code up to the locked region.
// Extra manager instances will just be gc'ed.
@ -117,12 +118,9 @@ instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
// Check if another thread has created the management object. We reload
// _memory_mgr_obj here because some other thread may have initialized
// it while we were executing the code before the lock.
//
// The lock has done an acquire, so the load can't float above it, but
// we need to do a load_acquire as above.
mgr_obj = Atomic::load_acquire(&_memory_mgr_obj);
mgr_obj = Atomic::load(&_memory_mgr_obj).resolve();
if (mgr_obj != NULL) {
return mgr_obj;
return (instanceOop)mgr_obj;
}
// Get the address of the object we created via call_special.
@ -132,15 +130,11 @@ instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
// with creating the management object are visible before publishing
// its address. The unlock will publish the store to _memory_mgr_obj
// because it does a release first.
Atomic::release_store(&_memory_mgr_obj, mgr_obj);
Atomic::release_store(&_memory_mgr_obj, OopHandle(Universe::vm_global(), mgr_obj));
}
}
return mgr_obj;
}
void MemoryManager::oops_do(OopClosure* f) {
f->do_oop((oop*) &_memory_mgr_obj);
return (instanceOop)mgr_obj;
}
GCStatInfo::GCStatInfo(int num_pools) {

@ -57,7 +57,7 @@ private:
const char* _name;
protected:
volatile instanceOop _memory_mgr_obj;
volatile OopHandle _memory_mgr_obj;
public:
MemoryManager(const char* name);
@ -77,9 +77,6 @@ public:
const char* name() const { return _name; }
// GC support
void oops_do(OopClosure* f);
// Static factory methods to get a memory manager of a specific type
static MemoryManager* get_code_cache_memory_manager();
static MemoryManager* get_metaspace_memory_manager();

@ -25,7 +25,9 @@
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "gc/shared/oopStorageSet.hpp"
#include "memory/metaspace.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
@ -62,7 +64,7 @@ MemoryPool::MemoryPool(const char* name,
{}
bool MemoryPool::is_pool(instanceHandle pool) const {
return pool() == Atomic::load(&_memory_pool_obj);
return pool() == Atomic::load(&_memory_pool_obj).resolve();
}
void MemoryPool::add_manager(MemoryManager* mgr) {
@ -74,13 +76,13 @@ void MemoryPool::add_manager(MemoryManager* mgr) {
}
// Returns an instanceHandle of a MemoryPool object.
// Returns an instanceOop of a MemoryPool object.
// It creates a MemoryPool instance when the first time
// this function is called.
instanceOop MemoryPool::get_memory_pool_instance(TRAPS) {
// Must do an acquire so as to force ordering of subsequent
// loads from anything _memory_pool_obj points to or implies.
instanceOop pool_obj = Atomic::load_acquire(&_memory_pool_obj);
oop pool_obj = Atomic::load_acquire(&_memory_pool_obj).resolve();
if (pool_obj == NULL) {
// It's ok for more than one thread to execute the code up to the locked region.
// Extra pool instances will just be gc'ed.
@ -118,12 +120,9 @@ instanceOop MemoryPool::get_memory_pool_instance(TRAPS) {
// Check if another thread has created the pool. We reload
// _memory_pool_obj here because some other thread may have
// initialized it while we were executing the code before the lock.
//
// The lock has done an acquire, so the load can't float above it,
// but we need to do a load_acquire as above.
pool_obj = Atomic::load_acquire(&_memory_pool_obj);
pool_obj = Atomic::load(&_memory_pool_obj).resolve();
if (pool_obj != NULL) {
return pool_obj;
return (instanceOop)pool_obj;
}
// Get the address of the object we created via call_special.
@ -133,11 +132,11 @@ instanceOop MemoryPool::get_memory_pool_instance(TRAPS) {
// with creating the pool are visible before publishing its address.
// The unlock will publish the store to _memory_pool_obj because
// it does a release first.
Atomic::release_store(&_memory_pool_obj, pool_obj);
Atomic::release_store(&_memory_pool_obj, OopHandle(Universe::vm_global(), pool_obj));
}
}
return pool_obj;
return (instanceOop)pool_obj;
}
inline static size_t get_max_value(size_t val1, size_t val2) {
@ -170,16 +169,6 @@ void MemoryPool::set_gc_usage_sensor_obj(instanceHandle sh) {
set_sensor_obj_at(&_gc_usage_sensor, sh);
}
void MemoryPool::oops_do(OopClosure* f) {
f->do_oop((oop*) &_memory_pool_obj);
if (_usage_sensor != NULL) {
_usage_sensor->oops_do(f);
}
if (_gc_usage_sensor != NULL) {
_gc_usage_sensor->oops_do(f);
}
}
CodeHeapPool::CodeHeapPool(CodeHeap* codeHeap, const char* name, bool support_usage_threshold) :
MemoryPool(name, NonHeap, codeHeap->capacity(), codeHeap->max_capacity(),
support_usage_threshold, false), _codeHeap(codeHeap) {

@ -73,7 +73,7 @@ class MemoryPool : public CHeapObj<mtInternal> {
SensorInfo* _usage_sensor;
SensorInfo* _gc_usage_sensor;
volatile instanceOop _memory_pool_obj;
volatile OopHandle _memory_pool_obj;
void add_manager(MemoryManager* mgr);
@ -136,9 +136,6 @@ class MemoryPool : public CHeapObj<mtInternal> {
virtual size_t used_in_bytes() = 0;
virtual bool is_collected_pool() { return false; }
virtual MemoryUsage get_last_collection_usage() { return _after_gc_usage; }
// GC support
void oops_do(OopClosure* f);
};
class CollectedMemoryPool : public MemoryPool {

@ -189,19 +189,6 @@ void MemoryService::gc_end(GCMemoryManager* manager, bool recordPostGCUsage,
countCollection, cause, allMemoryPoolsAffected);
}
void MemoryService::oops_do(OopClosure* f) {
int i;
for (i = 0; i < _pools_list->length(); i++) {
MemoryPool* pool = _pools_list->at(i);
pool->oops_do(f);
}
for (i = 0; i < _managers_list->length(); i++) {
MemoryManager* mgr = _managers_list->at(i);
mgr->oops_do(f);
}
}
bool MemoryService::set_verbose(bool verbose) {
MutexLocker m(Management_lock);
// verbose will be set to the previous value

@ -105,8 +105,6 @@ public:
GCCause::Cause cause,
bool allMemoryPoolsAffected);
static void oops_do(OopClosure* f);
static bool get_verbose() { return log_is_enabled(Info, gc); }
static bool set_verbose(bool verbose);

@ -28,6 +28,7 @@
#include "memory/heapInspection.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/objArrayOop.inline.hpp"
@ -261,13 +262,6 @@ bool ThreadService::set_thread_allocated_memory_enabled(bool flag) {
return prev;
}
// GC support
void ThreadService::oops_do(OopClosure* f) {
for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) {
dump->oops_do(f);
}
}
void ThreadService::metadata_do(void f(Metadata*)) {
for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) {
dump->metadata_do(f);
@ -538,11 +532,6 @@ ThreadSnapshot* ThreadDumpResult::add_thread_snapshot() {
}
ThreadSnapshot* ThreadDumpResult::add_thread_snapshot(JavaThread* thread) {
// Note: it is very important that the ThreadSnapshot* gets linked before
// ThreadSnapshot::initialize gets called. This is to ensure that
// ThreadSnapshot::oops_do can get called prior to the field
// ThreadSnapshot::_threadObj being assigned a value (to prevent a dangling
// oop).
ThreadSnapshot* ts = new ThreadSnapshot();
link_thread_snapshot(ts);
ts->initialize(t_list(), thread);
@ -561,12 +550,6 @@ void ThreadDumpResult::link_thread_snapshot(ThreadSnapshot* ts) {
_last = ts;
}
void ThreadDumpResult::oops_do(OopClosure* f) {
for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) {
ts->oops_do(f);
}
}
void ThreadDumpResult::metadata_do(void f(Metadata*)) {
for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) {
ts->metadata_do(f);
@ -580,31 +563,31 @@ ThreadsList* ThreadDumpResult::t_list() {
StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) {
_method = jvf->method();
_bci = jvf->bci();
_class_holder = _method->method_holder()->klass_holder();
_class_holder = OopHandle(Universe::vm_global(), _method->method_holder()->klass_holder());
_locked_monitors = NULL;
if (with_lock_info) {
ResourceMark rm;
GrowableArray<MonitorInfo*>* list = jvf->locked_monitors();
int length = list->length();
if (length > 0) {
_locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(length, mtServiceability);
_locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(length, mtServiceability);
for (int i = 0; i < length; i++) {
MonitorInfo* monitor = list->at(i);
assert(monitor->owner() != NULL, "This monitor must have an owning object");
_locked_monitors->append(monitor->owner());
_locked_monitors->append(OopHandle(Universe::vm_global(), monitor->owner()));
}
}
}
}
void StackFrameInfo::oops_do(OopClosure* f) {
StackFrameInfo::~StackFrameInfo() {
if (_locked_monitors != NULL) {
int length = _locked_monitors->length();
for (int i = 0; i < length; i++) {
f->do_oop((oop*) _locked_monitors->adr_at(i));
for (int i = 0; i < _locked_monitors->length(); i++) {
_locked_monitors->at(i).release(Universe::vm_global());
}
delete _locked_monitors;
}
f->do_oop(&_class_holder);
_class_holder.release(Universe::vm_global());
}
void StackFrameInfo::metadata_do(void f(Metadata*)) {
@ -616,7 +599,7 @@ void StackFrameInfo::print_on(outputStream* st) const {
java_lang_Throwable::print_stack_element(st, method(), bci());
int len = (_locked_monitors != NULL ? _locked_monitors->length() : 0);
for (int i = 0; i < len; i++) {
oop o = _locked_monitors->at(i);
oop o = _locked_monitors->at(i).resolve();
st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", p2i(o), o->klass()->external_name());
}
@ -648,18 +631,25 @@ ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) {
_depth = 0;
_with_locked_monitors = with_locked_monitors;
if (_with_locked_monitors) {
_jni_locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(INITIAL_ARRAY_SIZE, mtServiceability);
_jni_locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability);
} else {
_jni_locked_monitors = NULL;
}
}
void ThreadStackTrace::add_jni_locked_monitor(oop object) {
_jni_locked_monitors->append(OopHandle(Universe::vm_global(), object));
}
ThreadStackTrace::~ThreadStackTrace() {
for (int i = 0; i < _frames->length(); i++) {
delete _frames->at(i);
}
delete _frames;
if (_jni_locked_monitors != NULL) {
for (int i = 0; i < _jni_locked_monitors->length(); i++) {
_jni_locked_monitors->at(i).release(Universe::vm_global());
}
delete _jni_locked_monitors;
}
}
@ -703,9 +693,9 @@ bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) {
for (int depth = 0; depth < num_frames; depth++) {
StackFrameInfo* frame = stack_frame_at(depth);
int len = frame->num_locked_monitors();
GrowableArray<oop>* locked_monitors = frame->locked_monitors();
GrowableArray<OopHandle>* locked_monitors = frame->locked_monitors();
for (int j = 0; j < len; j++) {
oop monitor = locked_monitors->at(j);
oop monitor = locked_monitors->at(j).resolve();
assert(monitor != NULL, "must be a Java object");
if (monitor == object) {
found = true;
@ -738,18 +728,6 @@ void ThreadStackTrace::add_stack_frame(javaVFrame* jvf) {
_depth++;
}
void ThreadStackTrace::oops_do(OopClosure* f) {
int length = _frames->length();
for (int i = 0; i < length; i++) {
_frames->at(i)->oops_do(f);
}
length = (_jni_locked_monitors != NULL ? _jni_locked_monitors->length() : 0);
for (int j = 0; j < length; j++) {
f->do_oop((oop*) _jni_locked_monitors->adr_at(j));
}
}
void ThreadStackTrace::metadata_do(void f(Metadata*)) {
int length = _frames->length();
for (int i = 0; i < length; i++) {
@ -832,7 +810,7 @@ ThreadConcurrentLocks* ConcurrentLocksDump::thread_concurrent_locks(JavaThread*
void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) {
st->print_cr(" Locked ownable synchronizers:");
ThreadConcurrentLocks* tcl = thread_concurrent_locks(t);
GrowableArray<instanceOop>* locks = (tcl != NULL ? tcl->owned_locks() : NULL);
GrowableArray<OopHandle>* locks = (tcl != NULL ? tcl->owned_locks() : NULL);
if (locks == NULL || locks->is_empty()) {
st->print_cr("\t- None");
st->cr();
@ -840,7 +818,7 @@ void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) {
}
for (int i = 0; i < locks->length(); i++) {
instanceOop obj = locks->at(i);
oop obj = locks->at(i).resolve();
st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)", p2i(obj), obj->klass()->external_name());
}
st->cr();
@ -848,23 +826,19 @@ void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) {
ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) {
_thread = thread;
_owned_locks = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<instanceOop>(INITIAL_ARRAY_SIZE, mtServiceability);
_owned_locks = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<OopHandle>(INITIAL_ARRAY_SIZE, mtServiceability);
_next = NULL;
}
ThreadConcurrentLocks::~ThreadConcurrentLocks() {
for (int i = 0; i < _owned_locks->length(); i++) {
_owned_locks->at(i).release(Universe::vm_global());
}
delete _owned_locks;
}
void ThreadConcurrentLocks::add_lock(instanceOop o) {
_owned_locks->append(o);
}
void ThreadConcurrentLocks::oops_do(OopClosure* f) {
int length = _owned_locks->length();
for (int i = 0; i < length; i++) {
f->do_oop((oop*) _owned_locks->adr_at(i));
}
_owned_locks->append(OopHandle(Universe::vm_global(), o));
}
ThreadStatistics::ThreadStatistics() {
@ -876,9 +850,12 @@ ThreadStatistics::ThreadStatistics() {
memset((void*) _perf_recursion_counts, 0, sizeof(_perf_recursion_counts));
}
oop ThreadSnapshot::threadObj() const { return _threadObj.resolve(); }
void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) {
_thread = thread;
_threadObj = thread->threadObj();
oop threadObj = thread->threadObj();
_threadObj = OopHandle(Universe::vm_global(), threadObj);
ThreadStatistics* stat = thread->get_thread_stat();
_contended_enter_ticks = stat->contended_enter_ticks();
@ -888,10 +865,13 @@ void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) {
_sleep_ticks = stat->sleep_ticks();
_sleep_count = stat->sleep_count();
_thread_status = java_lang_Thread::get_thread_status(_threadObj);
_thread_status = java_lang_Thread::get_thread_status(threadObj);
_is_ext_suspended = thread->is_being_ext_suspended();
_is_in_native = (thread->thread_state() == _thread_in_native);
oop blocker_object = NULL;
oop blocker_object_owner = NULL;
if (_thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER ||
_thread_status == java_lang_Thread::IN_OBJECT_WAIT ||
_thread_status == java_lang_Thread::IN_OBJECT_WAIT_TIMED) {
@ -901,7 +881,7 @@ void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) {
// monitor no longer exists; thread is not blocked
_thread_status = java_lang_Thread::RUNNABLE;
} else {
_blocker_object = obj();
blocker_object = obj();
JavaThread* owner = ObjectSynchronizer::get_lock_owner(t_list, obj);
if ((owner == NULL && _thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER)
|| (owner != NULL && owner->is_attaching_via_jni())) {
@ -912,23 +892,37 @@ void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) {
// is not completely initialized. For example thread name and id
// and may not be set, so hide the attaching thread.
_thread_status = java_lang_Thread::RUNNABLE;
_blocker_object = NULL;
blocker_object = NULL;
} else if (owner != NULL) {
_blocker_object_owner = owner->threadObj();
blocker_object_owner = owner->threadObj();
}
}
}
// Support for JSR-166 locks
if (_thread_status == java_lang_Thread::PARKED || _thread_status == java_lang_Thread::PARKED_TIMED) {
_blocker_object = thread->current_park_blocker();
if (_blocker_object != NULL && _blocker_object->is_a(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
_blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(_blocker_object);
blocker_object = thread->current_park_blocker();
if (blocker_object != NULL && blocker_object->is_a(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(blocker_object);
}
}
if (blocker_object != NULL) {
_blocker_object = OopHandle(Universe::vm_global(), blocker_object);
}
if (blocker_object_owner != NULL) {
_blocker_object_owner = OopHandle(Universe::vm_global(), blocker_object_owner);
}
}
oop ThreadSnapshot::blocker_object() const { return _blocker_object.resolve(); }
oop ThreadSnapshot::blocker_object_owner() const { return _blocker_object_owner.resolve(); }
ThreadSnapshot::~ThreadSnapshot() {
_blocker_object.release(Universe::vm_global());
_blocker_object_owner.release(Universe::vm_global());
_threadObj.release(Universe::vm_global());
delete _stack_trace;
delete _concurrent_locks;
}
@ -939,18 +933,6 @@ void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_mon
}
void ThreadSnapshot::oops_do(OopClosure* f) {
f->do_oop(&_threadObj);
f->do_oop(&_blocker_object);
f->do_oop(&_blocker_object_owner);
if (_stack_trace != NULL) {
_stack_trace->oops_do(f);
}
if (_concurrent_locks != NULL) {
_concurrent_locks->oops_do(f);
}
}
void ThreadSnapshot::metadata_do(void f(Metadata*)) {
if (_stack_trace != NULL) {
_stack_trace->metadata_do(f);

@ -114,8 +114,6 @@ public:
static DeadlockCycle* find_deadlocks_at_safepoint(ThreadsList * t_list, bool object_monitors_only);
// GC support
static void oops_do(OopClosure* f);
static void metadata_do(void f(Metadata*));
};
@ -195,7 +193,7 @@ private:
// This JavaThread* is protected by being stored in objects that are
// protected by a ThreadsListSetter (ThreadDumpResult).
JavaThread* _thread;
oop _threadObj;
OopHandle _threadObj;
java_lang_Thread::ThreadStatus _thread_status;
bool _is_ext_suspended;
@ -207,8 +205,9 @@ private:
jlong _monitor_wait_count;
jlong _sleep_ticks;
jlong _sleep_count;
oop _blocker_object;
oop _blocker_object_owner;
OopHandle _blocker_object;
OopHandle _blocker_object_owner;
ThreadStackTrace* _stack_trace;
ThreadConcurrentLocks* _concurrent_locks;
@ -217,8 +216,7 @@ private:
// ThreadSnapshot instances should only be created via
// ThreadDumpResult::add_thread_snapshot.
friend class ThreadDumpResult;
ThreadSnapshot() : _thread(NULL), _threadObj(NULL),
_blocker_object(NULL), _blocker_object_owner(NULL),
ThreadSnapshot() : _thread(NULL),
_stack_trace(NULL), _concurrent_locks(NULL), _next(NULL) {};
void initialize(ThreadsList * t_list, JavaThread* thread);
@ -227,7 +225,7 @@ public:
java_lang_Thread::ThreadStatus thread_status() { return _thread_status; }
oop threadObj() const { return _threadObj; }
oop threadObj() const;
void set_next(ThreadSnapshot* n) { _next = n; }
@ -242,8 +240,8 @@ public:
jlong sleep_ticks() { return _sleep_ticks; }
oop blocker_object() { return _blocker_object; }
oop blocker_object_owner() { return _blocker_object_owner; }
oop blocker_object() const;
oop blocker_object_owner() const;
ThreadSnapshot* next() const { return _next; }
ThreadStackTrace* get_stack_trace() { return _stack_trace; }
@ -251,7 +249,6 @@ public:
void dump_stack_at_safepoint(int max_depth, bool with_locked_monitors);
void set_concurrent_locks(ThreadConcurrentLocks* l) { _concurrent_locks = l; }
void oops_do(OopClosure* f);
void metadata_do(void f(Metadata*));
};
@ -261,7 +258,7 @@ class ThreadStackTrace : public CHeapObj<mtInternal> {
int _depth; // number of stack frames added
bool _with_locked_monitors;
GrowableArray<StackFrameInfo*>* _frames;
GrowableArray<oop>* _jni_locked_monitors;
GrowableArray<OopHandle>* _jni_locked_monitors;
public:
@ -275,13 +272,12 @@ class ThreadStackTrace : public CHeapObj<mtInternal> {
void add_stack_frame(javaVFrame* jvf);
void dump_stack_at_safepoint(int max_depth);
Handle allocate_fill_stack_trace_element_array(TRAPS);
void oops_do(OopClosure* f);
void metadata_do(void f(Metadata*));
GrowableArray<oop>* jni_locked_monitors() { return _jni_locked_monitors; }
GrowableArray<OopHandle>* jni_locked_monitors() { return _jni_locked_monitors; }
int num_jni_locked_monitors() { return (_jni_locked_monitors != NULL ? _jni_locked_monitors->length() : 0); }
bool is_owned_monitor_on_stack(oop object);
void add_jni_locked_monitor(oop object) { _jni_locked_monitors->append(object); }
void add_jni_locked_monitor(oop object);
};
// StackFrameInfo for keeping Method* and bci during
@ -291,33 +287,28 @@ class StackFrameInfo : public CHeapObj<mtInternal> {
private:
Method* _method;
int _bci;
GrowableArray<oop>* _locked_monitors; // list of object monitors locked by this frame
GrowableArray<OopHandle>* _locked_monitors; // list of object monitors locked by this frame
// We need to save the mirrors in the backtrace to keep the class
// from being unloaded while we still have this stack trace.
oop _class_holder;
OopHandle _class_holder;
public:
StackFrameInfo(javaVFrame* jvf, bool with_locked_monitors);
~StackFrameInfo() {
if (_locked_monitors != NULL) {
delete _locked_monitors;
}
};
~StackFrameInfo();
Method* method() const { return _method; }
int bci() const { return _bci; }
void oops_do(OopClosure* f);
void metadata_do(void f(Metadata*));
int num_locked_monitors() { return (_locked_monitors != NULL ? _locked_monitors->length() : 0); }
GrowableArray<oop>* locked_monitors() { return _locked_monitors; }
GrowableArray<OopHandle>* locked_monitors() { return _locked_monitors; }
void print_on(outputStream* st) const;
};
class ThreadConcurrentLocks : public CHeapObj<mtInternal> {
private:
GrowableArray<instanceOop>* _owned_locks;
GrowableArray<OopHandle>* _owned_locks;
ThreadConcurrentLocks* _next;
// This JavaThread* is protected in one of two different ways
// depending on the usage of the ThreadConcurrentLocks object:
@ -334,8 +325,7 @@ private:
void set_next(ThreadConcurrentLocks* n) { _next = n; }
ThreadConcurrentLocks* next() { return _next; }
JavaThread* java_thread() { return _thread; }
GrowableArray<instanceOop>* owned_locks() { return _owned_locks; }
void oops_do(OopClosure* f);
GrowableArray<OopHandle>* owned_locks() { return _owned_locks; }
};
class ConcurrentLocksDump : public StackObj {
@ -389,7 +379,6 @@ class ThreadDumpResult : public StackObj {
void set_t_list() { _setter.set(); }
ThreadsList* t_list();
bool t_list_has_been_set() { return _setter.is_set(); }
void oops_do(OopClosure* f);
void metadata_do(void f(Metadata*));
};

@ -123,7 +123,6 @@ public class TestGCLogMessages {
new LogMessageWithLevel("Thread Roots", Level.TRACE),
new LogMessageWithLevel("Universe Roots", Level.TRACE),
new LogMessageWithLevel("ObjectSynchronizer Roots", Level.TRACE),
new LogMessageWithLevel("Management Roots", Level.TRACE),
new LogMessageWithLevel("CLDG Roots", Level.TRACE),
new LogMessageWithLevel("CM RefProcessor Roots", Level.TRACE),
new LogMessageWithLevel("JNI Global Roots", Level.TRACE),

@ -93,7 +93,6 @@ public class TestG1ParallelPhases {
"ThreadRoots",
"UniverseRoots",
"ObjectSynchronizerRoots",
"ManagementRoots",
"VM Global",
"JNI Global",
"CLDGRoots",