8273107: RunThese24H times out with "java.lang.management.ThreadInfo.getLockName()" is null
Reviewed-by: rehn, coleenp, eosterlund
This commit is contained in:
parent
8fbe1724e0
commit
a5f5d60f25
@ -63,6 +63,6 @@ void MonitorDeflationThread::monitor_deflation_thread_entry(JavaThread* jt, TRAP
|
||||
}
|
||||
}
|
||||
|
||||
(void)ObjectSynchronizer::deflate_idle_monitors();
|
||||
(void)ObjectSynchronizer::deflate_idle_monitors(/* ObjectMonitorsHashtable is not needed here */ nullptr);
|
||||
}
|
||||
}
|
||||
|
@ -57,6 +57,40 @@
|
||||
#include "utilities/events.hpp"
|
||||
#include "utilities/preserveException.hpp"
|
||||
|
||||
class CleanupObjectMonitorsHashtable: StackObj {
|
||||
public:
|
||||
bool do_entry(void*& key, ObjectMonitorsHashtable::PtrList*& list) {
|
||||
list->clear(); // clear the LinkListNodes
|
||||
delete list; // then delete the LinkedList
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
ObjectMonitorsHashtable::~ObjectMonitorsHashtable() {
|
||||
CleanupObjectMonitorsHashtable cleanup;
|
||||
_ptrs->unlink(&cleanup); // cleanup the LinkedLists
|
||||
delete _ptrs; // then delete the hash table
|
||||
}
|
||||
|
||||
void ObjectMonitorsHashtable::add_entry(void* key, ObjectMonitor* om) {
|
||||
ObjectMonitorsHashtable::PtrList* list = get_entry(key);
|
||||
if (list == nullptr) {
|
||||
// Create new list and add it to the hash table:
|
||||
list = new (ResourceObj::C_HEAP, mtThread) ObjectMonitorsHashtable::PtrList();
|
||||
add_entry(key, list);
|
||||
}
|
||||
list->add(om); // Add the ObjectMonitor to the list.
|
||||
_om_count++;
|
||||
}
|
||||
|
||||
bool ObjectMonitorsHashtable::has_entry(void* key, ObjectMonitor* om) {
|
||||
ObjectMonitorsHashtable::PtrList* list = get_entry(key);
|
||||
if (list == nullptr || list->find(om) == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void MonitorList::add(ObjectMonitor* m) {
|
||||
ObjectMonitor* head;
|
||||
do {
|
||||
@ -992,11 +1026,18 @@ JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_ob
|
||||
|
||||
// Visitors ...
|
||||
|
||||
// Iterate ObjectMonitors where the owner == thread; this does NOT include
|
||||
// ObjectMonitors where owner is set to a stack lock address in thread.
|
||||
//
|
||||
// This version of monitors_iterate() works with the in-use monitor list.
|
||||
//
|
||||
void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
|
||||
MonitorList::Iterator iter = _in_use_list.iterator();
|
||||
while (iter.has_next()) {
|
||||
ObjectMonitor* mid = iter.next();
|
||||
if (mid->owner() != thread) {
|
||||
// Not owned by the target thread and intentionally skips when owner
|
||||
// is set to a stack lock address in the target thread.
|
||||
continue;
|
||||
}
|
||||
if (!mid->is_being_async_deflated() && mid->object_peek() != NULL) {
|
||||
@ -1013,6 +1054,31 @@ void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure, JavaThread* t
|
||||
}
|
||||
}
|
||||
|
||||
// This version of monitors_iterate() works with the specified linked list.
|
||||
//
|
||||
void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure,
|
||||
ObjectMonitorsHashtable::PtrList* list,
|
||||
JavaThread* thread) {
|
||||
typedef LinkedListIterator<ObjectMonitor*> ObjectMonitorIterator;
|
||||
ObjectMonitorIterator iter(list->head());
|
||||
while (!iter.is_empty()) {
|
||||
ObjectMonitor* mid = *iter.next();
|
||||
// Owner set to a stack lock address in thread should never be seen here:
|
||||
assert(mid->owner() == thread, "must be");
|
||||
if (!mid->is_being_async_deflated() && mid->object_peek() != NULL) {
|
||||
// Only process with closure if the object is set.
|
||||
|
||||
// monitors_iterate() is only called at a safepoint or when the
|
||||
// target thread is suspended or when the target thread is
|
||||
// operating on itself. The current closures in use today are
|
||||
// only interested in an owned ObjectMonitor and ownership
|
||||
// cannot be dropped under the calling contexts so the
|
||||
// ObjectMonitor cannot be async deflated.
|
||||
closure->do_monitor(mid);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool monitors_used_above_threshold(MonitorList* list) {
|
||||
if (MonitorUsedDeflationThreshold == 0) { // disabled case is easy
|
||||
return false;
|
||||
@ -1338,8 +1404,17 @@ void ObjectSynchronizer::chk_for_block_req(JavaThread* current, const char* op_n
|
||||
|
||||
// Walk the in-use list and deflate (at most MonitorDeflationMax) idle
|
||||
// ObjectMonitors. Returns the number of deflated ObjectMonitors.
|
||||
//
|
||||
// If table != nullptr, we gather owned ObjectMonitors indexed by the
|
||||
// owner in the table. Please note that ObjectMonitors where the owner
|
||||
// is set to a stack lock address are NOT associated with the JavaThread
|
||||
// that holds that stack lock. All of the current consumers of
|
||||
// ObjectMonitorsHashtable info only care about JNI locked monitors and
|
||||
// those do not have the owner set to a stack lock address.
|
||||
//
|
||||
size_t ObjectSynchronizer::deflate_monitor_list(Thread* current, LogStream* ls,
|
||||
elapsedTimer* timer_p) {
|
||||
elapsedTimer* timer_p,
|
||||
ObjectMonitorsHashtable* table) {
|
||||
MonitorList::Iterator iter = _in_use_list.iterator();
|
||||
size_t deflated_count = 0;
|
||||
|
||||
@ -1350,6 +1425,18 @@ size_t ObjectSynchronizer::deflate_monitor_list(Thread* current, LogStream* ls,
|
||||
ObjectMonitor* mid = iter.next();
|
||||
if (mid->deflate_monitor()) {
|
||||
deflated_count++;
|
||||
} else if (table != nullptr) {
|
||||
// The caller is interested in the owned ObjectMonitors. This does
|
||||
// not include when owner is set to a stack lock address in thread.
|
||||
// This also does not capture unowned ObjectMonitors that cannot be
|
||||
// deflated because of a waiter.
|
||||
void* key = mid->owner();
|
||||
// Since deflate_idle_monitors() and deflate_monitor_list() can be
|
||||
// called more than once, we have to make sure the entry has not
|
||||
// already been added.
|
||||
if (key != nullptr && !table->has_entry(key, mid)) {
|
||||
table->add_entry(key, mid);
|
||||
}
|
||||
}
|
||||
|
||||
if (current->is_Java_thread()) {
|
||||
@ -1374,8 +1461,8 @@ class HandshakeForDeflation : public HandshakeClosure {
|
||||
|
||||
// This function is called by the MonitorDeflationThread to deflate
|
||||
// ObjectMonitors. It is also called via do_final_audit_and_print_stats()
|
||||
// by the VMThread.
|
||||
size_t ObjectSynchronizer::deflate_idle_monitors() {
|
||||
// and VM_ThreadDump::doit() by the VMThread.
|
||||
size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table) {
|
||||
Thread* current = Thread::current();
|
||||
if (current->is_Java_thread()) {
|
||||
// The async deflation request has been processed.
|
||||
@ -1400,7 +1487,7 @@ size_t ObjectSynchronizer::deflate_idle_monitors() {
|
||||
}
|
||||
|
||||
// Deflate some idle ObjectMonitors.
|
||||
size_t deflated_count = deflate_monitor_list(current, ls, &timer);
|
||||
size_t deflated_count = deflate_monitor_list(current, ls, &timer, table);
|
||||
if (deflated_count > 0 || is_final_audit()) {
|
||||
// There are ObjectMonitors that have been deflated or this is the
|
||||
// final audit and all the remaining ObjectMonitors have been
|
||||
@ -1458,6 +1545,10 @@ size_t ObjectSynchronizer::deflate_idle_monitors() {
|
||||
}
|
||||
ls->print_cr("end deflating: in_use_list stats: ceiling=" SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
|
||||
in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
|
||||
if (table != nullptr) {
|
||||
ls->print_cr("ObjectMonitorsHashtable: key_count=" SIZE_FORMAT ", om_count=" SIZE_FORMAT,
|
||||
table->key_count(), table->om_count());
|
||||
}
|
||||
}
|
||||
|
||||
OM_PERFDATA_OP(MonExtant, set_value(_in_use_list.count()));
|
||||
@ -1560,7 +1651,7 @@ void ObjectSynchronizer::do_final_audit_and_print_stats() {
|
||||
// Do a deflation in order to reduce the in-use monitor population
|
||||
// that is reported by ObjectSynchronizer::log_in_use_monitor_details()
|
||||
// which is called by ObjectSynchronizer::audit_and_print_stats().
|
||||
while (ObjectSynchronizer::deflate_idle_monitors() != 0) {
|
||||
while (ObjectSynchronizer::deflate_idle_monitors(/* ObjectMonitorsHashtable is not needed here */ nullptr) >= (size_t)MonitorDeflationMax) {
|
||||
; // empty
|
||||
}
|
||||
// The other audit_and_print_stats() call is done at the Debug
|
||||
|
@ -30,11 +30,69 @@
|
||||
#include "runtime/basicLock.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/linkedlist.hpp"
|
||||
#include "utilities/resourceHash.hpp"
|
||||
|
||||
class LogStream;
|
||||
class ObjectMonitor;
|
||||
class ThreadsList;
|
||||
|
||||
// Hash table of void* to a list of ObjectMonitor* owned by the JavaThread.
|
||||
// The JavaThread's owner key is either a JavaThread* or a stack lock
|
||||
// address in the JavaThread so we use "void*".
|
||||
//
|
||||
class ObjectMonitorsHashtable {
|
||||
private:
|
||||
static unsigned int ptr_hash(void* const& s1) {
|
||||
// 2654435761 = 2^32 * Phi (golden ratio)
|
||||
return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u);
|
||||
}
|
||||
|
||||
public:
|
||||
typedef LinkedListImpl<ObjectMonitor*,
|
||||
ResourceObj::C_HEAP, mtThread,
|
||||
AllocFailStrategy::RETURN_NULL> PtrList;
|
||||
|
||||
// ResourceHashtable SIZE is specified at compile time so we
|
||||
// use 1031 which is the first prime after 1024.
|
||||
typedef ResourceHashtable<void*, PtrList*, 1031, ResourceObj::C_HEAP, mtThread,
|
||||
&ObjectMonitorsHashtable::ptr_hash> PtrTable;
|
||||
private:
|
||||
PtrTable* _ptrs;
|
||||
size_t _key_count;
|
||||
size_t _om_count;
|
||||
|
||||
public:
|
||||
// ResourceHashtable is passed to various functions and populated in
|
||||
// different places so we allocate it using C_HEAP to make it immune
|
||||
// from any ResourceMarks that happen to be in the code paths.
|
||||
ObjectMonitorsHashtable() : _ptrs(new (ResourceObj::C_HEAP, mtThread) PtrTable()), _key_count(0), _om_count(0) {}
|
||||
|
||||
~ObjectMonitorsHashtable();
|
||||
|
||||
void add_entry(void* key, ObjectMonitor* om);
|
||||
|
||||
void add_entry(void* key, PtrList* list) {
|
||||
_ptrs->put(key, list);
|
||||
_key_count++;
|
||||
}
|
||||
|
||||
PtrList* get_entry(void* key) {
|
||||
PtrList** listpp = _ptrs->get(key);
|
||||
return (listpp == nullptr) ? nullptr : *listpp;
|
||||
}
|
||||
|
||||
bool has_entry(void* key) {
|
||||
PtrList** listpp = _ptrs->get(key);
|
||||
return listpp != nullptr && *listpp != nullptr;
|
||||
}
|
||||
|
||||
bool has_entry(void* key, ObjectMonitor* om);
|
||||
|
||||
size_t key_count() { return _key_count; }
|
||||
size_t om_count() { return _om_count; }
|
||||
};
|
||||
|
||||
class MonitorList {
|
||||
friend class VMStructs;
|
||||
|
||||
@ -133,21 +191,30 @@ class ObjectSynchronizer : AllStatic {
|
||||
|
||||
// JNI detach support
|
||||
static void release_monitors_owned_by_thread(JavaThread* current);
|
||||
|
||||
// Iterate ObjectMonitors where the owner == thread; this does NOT include
|
||||
// ObjectMonitors where owner is set to a stack lock address in thread:
|
||||
//
|
||||
// This version of monitors_iterate() works with the in-use monitor list.
|
||||
static void monitors_iterate(MonitorClosure* m, JavaThread* thread);
|
||||
// This version of monitors_iterate() works with the specified linked list.
|
||||
static void monitors_iterate(MonitorClosure* closure,
|
||||
ObjectMonitorsHashtable::PtrList* list,
|
||||
JavaThread* thread);
|
||||
|
||||
// Initialize the gInflationLocks
|
||||
static void initialize();
|
||||
|
||||
// GC: we current use aggressive monitor deflation policy
|
||||
// GC: we currently use aggressive monitor deflation policy
|
||||
// Basically we try to deflate all monitors that are not busy.
|
||||
static size_t deflate_idle_monitors();
|
||||
static size_t deflate_idle_monitors(ObjectMonitorsHashtable* table);
|
||||
|
||||
// Deflate idle monitors:
|
||||
static void chk_for_block_req(JavaThread* current, const char* op_name,
|
||||
const char* cnt_name, size_t cnt, LogStream* ls,
|
||||
elapsedTimer* timer_p);
|
||||
static size_t deflate_monitor_list(Thread* current, LogStream* ls,
|
||||
elapsedTimer* timer_p);
|
||||
static size_t deflate_monitor_list(Thread* current, LogStream* ls, elapsedTimer* timer_p,
|
||||
ObjectMonitorsHashtable* table);
|
||||
static size_t in_use_list_ceiling();
|
||||
static void dec_in_use_list_ceiling();
|
||||
static void inc_in_use_list_ceiling();
|
||||
|
@ -279,6 +279,18 @@ void VM_ThreadDump::doit() {
|
||||
concurrent_locks.dump_at_safepoint();
|
||||
}
|
||||
|
||||
ObjectMonitorsHashtable table;
|
||||
ObjectMonitorsHashtable* tablep = nullptr;
|
||||
if (_with_locked_monitors) {
|
||||
// The caller wants locked monitor information and that's expensive to gather
|
||||
// when there are a lot of inflated monitors. So we deflate idle monitors and
|
||||
// gather information about owned monitors at the same time.
|
||||
tablep = &table;
|
||||
while (ObjectSynchronizer::deflate_idle_monitors(tablep) >= (size_t)MonitorDeflationMax) {
|
||||
; /* empty */
|
||||
}
|
||||
}
|
||||
|
||||
if (_num_threads == 0) {
|
||||
// Snapshot all live threads
|
||||
|
||||
@ -293,7 +305,7 @@ void VM_ThreadDump::doit() {
|
||||
if (_with_locked_synchronizers) {
|
||||
tcl = concurrent_locks.thread_concurrent_locks(jt);
|
||||
}
|
||||
snapshot_thread(jt, tcl);
|
||||
snapshot_thread(jt, tcl, tablep);
|
||||
}
|
||||
} else {
|
||||
// Snapshot threads in the given _threads array
|
||||
@ -328,14 +340,15 @@ void VM_ThreadDump::doit() {
|
||||
if (_with_locked_synchronizers) {
|
||||
tcl = concurrent_locks.thread_concurrent_locks(jt);
|
||||
}
|
||||
snapshot_thread(jt, tcl);
|
||||
snapshot_thread(jt, tcl, tablep);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl) {
|
||||
void VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl,
|
||||
ObjectMonitorsHashtable* table) {
|
||||
ThreadSnapshot* snapshot = _result->add_thread_snapshot(java_thread);
|
||||
snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors);
|
||||
snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors, table);
|
||||
snapshot->set_concurrent_locks(tcl);
|
||||
}
|
||||
|
||||
|
@ -207,7 +207,8 @@ class VM_ThreadDump : public VM_Operation {
|
||||
bool _with_locked_monitors;
|
||||
bool _with_locked_synchronizers;
|
||||
|
||||
void snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl);
|
||||
void snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl,
|
||||
ObjectMonitorsHashtable* table);
|
||||
|
||||
public:
|
||||
VM_ThreadDump(ThreadDumpResult* result,
|
||||
|
@ -2388,7 +2388,7 @@ void VM_HeapDumper::dump_stack_traces() {
|
||||
HandleMark hm(current_thread);
|
||||
|
||||
ThreadStackTrace* stack_trace = new ThreadStackTrace(thread, false);
|
||||
stack_trace->dump_stack_at_safepoint(-1);
|
||||
stack_trace->dump_stack_at_safepoint(-1, /* ObjectMonitorsHashtable is not needed here */ nullptr);
|
||||
_stack_traces[_num_threads++] = stack_trace;
|
||||
|
||||
// write HPROF_FRAME records for this thread's stack trace
|
||||
|
@ -659,7 +659,7 @@ ThreadStackTrace::~ThreadStackTrace() {
|
||||
}
|
||||
}
|
||||
|
||||
void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth) {
|
||||
void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth, ObjectMonitorsHashtable* table) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
|
||||
|
||||
if (_thread->has_last_Java_frame()) {
|
||||
@ -683,9 +683,19 @@ void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth) {
|
||||
|
||||
if (_with_locked_monitors) {
|
||||
// Iterate inflated monitors and find monitors locked by this thread
|
||||
// not found in the stack
|
||||
// that are not found in the stack, e.g. JNI locked monitors:
|
||||
InflatedMonitorsClosure imc(this);
|
||||
ObjectSynchronizer::monitors_iterate(&imc, _thread);
|
||||
if (table != nullptr) {
|
||||
// Get the ObjectMonitors locked by the target thread, if any,
|
||||
// and does not include any where owner is set to a stack lock
|
||||
// address in the target thread:
|
||||
ObjectMonitorsHashtable::PtrList* list = table->get_entry(_thread);
|
||||
if (list != nullptr) {
|
||||
ObjectSynchronizer::monitors_iterate(&imc, list, _thread);
|
||||
}
|
||||
} else {
|
||||
ObjectSynchronizer::monitors_iterate(&imc, _thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -936,9 +946,10 @@ ThreadSnapshot::~ThreadSnapshot() {
|
||||
delete _concurrent_locks;
|
||||
}
|
||||
|
||||
void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors) {
|
||||
void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors,
|
||||
ObjectMonitorsHashtable* table) {
|
||||
_stack_trace = new ThreadStackTrace(_thread, with_locked_monitors);
|
||||
_stack_trace->dump_stack_at_safepoint(max_depth);
|
||||
_stack_trace->dump_stack_at_safepoint(max_depth, table);
|
||||
}
|
||||
|
||||
|
||||
|
@ -247,7 +247,8 @@ public:
|
||||
ThreadStackTrace* get_stack_trace() { return _stack_trace; }
|
||||
ThreadConcurrentLocks* get_concurrent_locks() { return _concurrent_locks; }
|
||||
|
||||
void dump_stack_at_safepoint(int max_depth, bool with_locked_monitors);
|
||||
void dump_stack_at_safepoint(int max_depth, bool with_locked_monitors,
|
||||
ObjectMonitorsHashtable* table);
|
||||
void set_concurrent_locks(ThreadConcurrentLocks* l) { _concurrent_locks = l; }
|
||||
void metadata_do(void f(Metadata*));
|
||||
};
|
||||
@ -270,7 +271,7 @@ class ThreadStackTrace : public CHeapObj<mtInternal> {
|
||||
int get_stack_depth() { return _depth; }
|
||||
|
||||
void add_stack_frame(javaVFrame* jvf);
|
||||
void dump_stack_at_safepoint(int max_depth);
|
||||
void dump_stack_at_safepoint(int max_depth, ObjectMonitorsHashtable* table);
|
||||
Handle allocate_fill_stack_trace_element_array(TRAPS);
|
||||
void metadata_do(void f(Metadata*));
|
||||
GrowableArray<OopHandle>* jni_locked_monitors() { return _jni_locked_monitors; }
|
||||
|
Loading…
x
Reference in New Issue
Block a user