8318757: VM_ThreadDump asserts in interleaved ObjectMonitor::deflate_monitor calls
Reviewed-by: shade, aboldtch, pchilanomate, dcubed
This commit is contained in:
parent
9faead1469
commit
87be6b69fe
@ -990,7 +990,7 @@ JvmtiEnvBase::get_owned_monitors(JavaThread *calling_thread, JavaThread* java_th
|
||||
|
||||
// Get off stack monitors. (e.g. acquired via jni MonitorEnter).
|
||||
JvmtiMonitorClosure jmc(calling_thread, owned_monitors_list, this);
|
||||
ObjectSynchronizer::monitors_iterate(&jmc, java_thread);
|
||||
ObjectSynchronizer::owned_monitors_iterate(&jmc, java_thread);
|
||||
err = jmc.error();
|
||||
|
||||
return err;
|
||||
@ -1017,7 +1017,7 @@ JvmtiEnvBase::get_owned_monitors(JavaThread* calling_thread, JavaThread* java_th
|
||||
|
||||
// Get off stack monitors. (e.g. acquired via jni MonitorEnter).
|
||||
JvmtiMonitorClosure jmc(calling_thread, owned_monitors_list, this);
|
||||
ObjectSynchronizer::monitors_iterate(&jmc, java_thread);
|
||||
ObjectSynchronizer::owned_monitors_iterate(&jmc, java_thread);
|
||||
err = jmc.error();
|
||||
|
||||
return err;
|
||||
|
@ -1849,7 +1849,7 @@ WB_END
|
||||
|
||||
WB_ENTRY(jboolean, WB_DeflateIdleMonitors(JNIEnv* env, jobject wb))
|
||||
log_info(monitorinflation)("WhiteBox initiated DeflateIdleMonitors");
|
||||
return ObjectSynchronizer::request_deflate_idle_monitors();
|
||||
return ObjectSynchronizer::request_deflate_idle_monitors_from_wb();
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(void, WB_ForceSafepoint(JNIEnv* env, jobject wb))
|
||||
|
@ -94,6 +94,6 @@ void MonitorDeflationThread::monitor_deflation_thread_entry(JavaThread* jt, TRAP
|
||||
}
|
||||
}
|
||||
|
||||
(void)ObjectSynchronizer::deflate_idle_monitors(/* ObjectMonitorsHashtable is not needed here */ nullptr);
|
||||
(void)ObjectSynchronizer::deflate_idle_monitors();
|
||||
}
|
||||
}
|
||||
|
@ -63,45 +63,6 @@
|
||||
#include "utilities/linkedlist.hpp"
|
||||
#include "utilities/preserveException.hpp"
|
||||
|
||||
class ObjectMonitorsHashtable::PtrList :
|
||||
public LinkedListImpl<ObjectMonitor*,
|
||||
AnyObj::C_HEAP, mtThread,
|
||||
AllocFailStrategy::RETURN_NULL> {};
|
||||
|
||||
class CleanupObjectMonitorsHashtable: StackObj {
|
||||
public:
|
||||
bool do_entry(void*& key, ObjectMonitorsHashtable::PtrList*& list) {
|
||||
list->clear(); // clear the LinkListNodes
|
||||
delete list; // then delete the LinkedList
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
ObjectMonitorsHashtable::~ObjectMonitorsHashtable() {
|
||||
CleanupObjectMonitorsHashtable cleanup;
|
||||
_ptrs->unlink(&cleanup); // cleanup the LinkedLists
|
||||
delete _ptrs; // then delete the hash table
|
||||
}
|
||||
|
||||
void ObjectMonitorsHashtable::add_entry(void* key, ObjectMonitor* om) {
|
||||
ObjectMonitorsHashtable::PtrList* list = get_entry(key);
|
||||
if (list == nullptr) {
|
||||
// Create new list and add it to the hash table:
|
||||
list = new (mtThread) ObjectMonitorsHashtable::PtrList;
|
||||
add_entry(key, list);
|
||||
}
|
||||
list->add(om); // Add the ObjectMonitor to the list.
|
||||
_om_count++;
|
||||
}
|
||||
|
||||
bool ObjectMonitorsHashtable::has_entry(void* key, ObjectMonitor* om) {
|
||||
ObjectMonitorsHashtable::PtrList* list = get_entry(key);
|
||||
if (list == nullptr || list->find(om) == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void MonitorList::add(ObjectMonitor* m) {
|
||||
ObjectMonitor* head;
|
||||
do {
|
||||
@ -1099,57 +1060,40 @@ JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_ob
|
||||
|
||||
// Visitors ...
|
||||
|
||||
// Iterate ObjectMonitors where the owner == thread; this does NOT include
|
||||
// ObjectMonitors where owner is set to a stack-lock address in thread.
|
||||
//
|
||||
// This version of monitors_iterate() works with the in-use monitor list.
|
||||
//
|
||||
void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
|
||||
// Iterate ObjectMonitors owned by any thread and where the owner `filter`
|
||||
// returns true.
|
||||
template <typename OwnerFilter>
|
||||
void ObjectSynchronizer::owned_monitors_iterate_filtered(MonitorClosure* closure, OwnerFilter filter) {
|
||||
MonitorList::Iterator iter = _in_use_list.iterator();
|
||||
while (iter.has_next()) {
|
||||
ObjectMonitor* mid = iter.next();
|
||||
if (mid->owner() != thread) {
|
||||
// Not owned by the target thread and intentionally skips when owner
|
||||
// is set to a stack-lock address in the target thread.
|
||||
continue;
|
||||
}
|
||||
if (!mid->is_being_async_deflated() && mid->object_peek() != nullptr) {
|
||||
// Only process with closure if the object is set.
|
||||
|
||||
// monitors_iterate() is only called at a safepoint or when the
|
||||
// target thread is suspended or when the target thread is
|
||||
// operating on itself. The current closures in use today are
|
||||
// only interested in an owned ObjectMonitor and ownership
|
||||
// cannot be dropped under the calling contexts so the
|
||||
// ObjectMonitor cannot be async deflated.
|
||||
// This function is only called at a safepoint or when the
|
||||
// target thread is suspended or when the target thread is
|
||||
// operating on itself. The current closures in use today are
|
||||
// only interested in an owned ObjectMonitor and ownership
|
||||
// cannot be dropped under the calling contexts so the
|
||||
// ObjectMonitor cannot be async deflated.
|
||||
if (mid->has_owner() && filter(mid->owner_raw())) {
|
||||
assert(!mid->is_being_async_deflated(), "Owned monitors should not be deflating");
|
||||
assert(mid->object_peek() != nullptr, "Owned monitors should not have a dead object");
|
||||
|
||||
closure->do_monitor(mid);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This version of monitors_iterate() works with the specified linked list.
|
||||
//
|
||||
void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure,
|
||||
ObjectMonitorsHashtable::PtrList* list,
|
||||
JavaThread* thread) {
|
||||
typedef LinkedListIterator<ObjectMonitor*> ObjectMonitorIterator;
|
||||
ObjectMonitorIterator iter(list->head());
|
||||
while (!iter.is_empty()) {
|
||||
ObjectMonitor* mid = *iter.next();
|
||||
// Owner set to a stack-lock address in thread should never be seen here:
|
||||
assert(mid->owner() == thread, "must be");
|
||||
if (!mid->is_being_async_deflated() && mid->object_peek() != nullptr) {
|
||||
// Only process with closure if the object is set.
|
||||
// Iterate ObjectMonitors where the owner == thread; this does NOT include
|
||||
// ObjectMonitors where owner is set to a stack-lock address in thread.
|
||||
void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure, JavaThread* thread) {
|
||||
auto thread_filter = [&](void* owner) { return owner == thread; };
|
||||
return owned_monitors_iterate_filtered(closure, thread_filter);
|
||||
}
|
||||
|
||||
// monitors_iterate() is only called at a safepoint or when the
|
||||
// target thread is suspended or when the target thread is
|
||||
// operating on itself. The current closures in use today are
|
||||
// only interested in an owned ObjectMonitor and ownership
|
||||
// cannot be dropped under the calling contexts so the
|
||||
// ObjectMonitor cannot be async deflated.
|
||||
closure->do_monitor(mid);
|
||||
}
|
||||
}
|
||||
// Iterate ObjectMonitors owned by any thread.
|
||||
void ObjectSynchronizer::owned_monitors_iterate(MonitorClosure* closure) {
|
||||
auto all_filter = [&](void* owner) { return true; };
|
||||
return owned_monitors_iterate_filtered(closure, all_filter);
|
||||
}
|
||||
|
||||
static bool monitors_used_above_threshold(MonitorList* list) {
|
||||
@ -1256,16 +1200,20 @@ bool ObjectSynchronizer::is_async_deflation_needed() {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ObjectSynchronizer::request_deflate_idle_monitors() {
|
||||
void ObjectSynchronizer::request_deflate_idle_monitors() {
|
||||
MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag);
|
||||
set_is_async_deflation_requested(true);
|
||||
ml.notify_all();
|
||||
}
|
||||
|
||||
bool ObjectSynchronizer::request_deflate_idle_monitors_from_wb() {
|
||||
JavaThread* current = JavaThread::current();
|
||||
bool ret_code = false;
|
||||
|
||||
jlong last_time = last_async_deflation_time_ns();
|
||||
set_is_async_deflation_requested(true);
|
||||
{
|
||||
MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag);
|
||||
ml.notify_all();
|
||||
}
|
||||
|
||||
request_deflate_idle_monitors();
|
||||
|
||||
const int N_CHECKS = 5;
|
||||
for (int i = 0; i < N_CHECKS; i++) { // sleep for at most 5 seconds
|
||||
if (last_async_deflation_time_ns() > last_time) {
|
||||
@ -1582,16 +1530,8 @@ void ObjectSynchronizer::chk_for_block_req(JavaThread* current, const char* op_n
|
||||
// Walk the in-use list and deflate (at most MonitorDeflationMax) idle
|
||||
// ObjectMonitors. Returns the number of deflated ObjectMonitors.
|
||||
//
|
||||
// If table != nullptr, we gather owned ObjectMonitors indexed by the
|
||||
// owner in the table. Please note that ObjectMonitors where the owner
|
||||
// is set to a stack-lock address are NOT associated with the JavaThread
|
||||
// that holds that stack-lock. All of the current consumers of
|
||||
// ObjectMonitorsHashtable info only care about JNI locked monitors and
|
||||
// those do not have the owner set to a stack-lock address.
|
||||
//
|
||||
size_t ObjectSynchronizer::deflate_monitor_list(Thread* current, LogStream* ls,
|
||||
elapsedTimer* timer_p,
|
||||
ObjectMonitorsHashtable* table) {
|
||||
elapsedTimer* timer_p) {
|
||||
MonitorList::Iterator iter = _in_use_list.iterator();
|
||||
size_t deflated_count = 0;
|
||||
|
||||
@ -1602,18 +1542,6 @@ size_t ObjectSynchronizer::deflate_monitor_list(Thread* current, LogStream* ls,
|
||||
ObjectMonitor* mid = iter.next();
|
||||
if (mid->deflate_monitor()) {
|
||||
deflated_count++;
|
||||
} else if (table != nullptr) {
|
||||
// The caller is interested in the owned ObjectMonitors. This does
|
||||
// not include when owner is set to a stack-lock address in thread.
|
||||
// This also does not capture unowned ObjectMonitors that cannot be
|
||||
// deflated because of a waiter.
|
||||
void* key = mid->owner();
|
||||
// Since deflate_idle_monitors() and deflate_monitor_list() can be
|
||||
// called more than once, we have to make sure the entry has not
|
||||
// already been added.
|
||||
if (key != nullptr && !table->has_entry(key, mid)) {
|
||||
table->add_entry(key, mid);
|
||||
}
|
||||
}
|
||||
|
||||
if (current->is_Java_thread()) {
|
||||
@ -1657,9 +1585,8 @@ static size_t delete_monitors(GrowableArray<ObjectMonitor*>* delete_list) {
|
||||
}
|
||||
|
||||
// This function is called by the MonitorDeflationThread to deflate
|
||||
// ObjectMonitors. It is also called via do_final_audit_and_print_stats()
|
||||
// and VM_ThreadDump::doit() by the VMThread.
|
||||
size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table) {
|
||||
// ObjectMonitors. It is also called via do_final_audit_and_print_stats().
|
||||
size_t ObjectSynchronizer::deflate_idle_monitors() {
|
||||
Thread* current = Thread::current();
|
||||
if (current->is_Java_thread()) {
|
||||
// The async deflation request has been processed.
|
||||
@ -1684,7 +1611,7 @@ size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table)
|
||||
}
|
||||
|
||||
// Deflate some idle ObjectMonitors.
|
||||
size_t deflated_count = deflate_monitor_list(current, ls, &timer, table);
|
||||
size_t deflated_count = deflate_monitor_list(current, ls, &timer);
|
||||
size_t unlinked_count = 0;
|
||||
size_t deleted_count = 0;
|
||||
if (deflated_count > 0 || is_final_audit()) {
|
||||
@ -1766,10 +1693,6 @@ size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table)
|
||||
}
|
||||
ls->print_cr("end deflating: in_use_list stats: ceiling=" SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
|
||||
in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
|
||||
if (table != nullptr) {
|
||||
ls->print_cr("ObjectMonitorsHashtable: key_count=" SIZE_FORMAT ", om_count=" SIZE_FORMAT,
|
||||
table->key_count(), table->om_count());
|
||||
}
|
||||
}
|
||||
|
||||
OM_PERFDATA_OP(MonExtant, set_value(_in_use_list.count()));
|
||||
@ -1822,7 +1745,7 @@ void ObjectSynchronizer::release_monitors_owned_by_thread(JavaThread* current) {
|
||||
assert(current == JavaThread::current(), "must be current Java thread");
|
||||
NoSafepointVerifier nsv;
|
||||
ReleaseJavaMonitorsClosure rjmc(current);
|
||||
ObjectSynchronizer::monitors_iterate(&rjmc, current);
|
||||
ObjectSynchronizer::owned_monitors_iterate(&rjmc, current);
|
||||
assert(!current->has_pending_exception(), "Should not be possible");
|
||||
current->clear_pending_exception();
|
||||
assert(current->held_monitor_count() == 0, "Should not be possible");
|
||||
@ -1879,7 +1802,7 @@ void ObjectSynchronizer::do_final_audit_and_print_stats() {
|
||||
// Do deflations in order to reduce the in-use monitor population
|
||||
// that is reported by ObjectSynchronizer::log_in_use_monitor_details()
|
||||
// which is called by ObjectSynchronizer::audit_and_print_stats().
|
||||
while (deflate_idle_monitors(/* ObjectMonitorsHashtable is not needed here */ nullptr) > 0) {
|
||||
while (deflate_idle_monitors() > 0) {
|
||||
; // empty
|
||||
}
|
||||
// The other audit_and_print_stats() call is done at the Debug
|
||||
|
@ -36,55 +36,6 @@ class LogStream;
|
||||
class ObjectMonitor;
|
||||
class ThreadsList;
|
||||
|
||||
// Hash table of void* to a list of ObjectMonitor* owned by the JavaThread.
|
||||
// The JavaThread's owner key is either a JavaThread* or a stack lock
|
||||
// address in the JavaThread so we use "void*".
|
||||
//
|
||||
class ObjectMonitorsHashtable {
|
||||
private:
|
||||
static unsigned int ptr_hash(void* const& s1) {
|
||||
// 2654435761 = 2^32 * Phi (golden ratio)
|
||||
return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u);
|
||||
}
|
||||
|
||||
public:
|
||||
class PtrList;
|
||||
|
||||
private:
|
||||
// ResourceHashtable SIZE is specified at compile time so we
|
||||
// use 1031 which is the first prime after 1024.
|
||||
typedef ResourceHashtable<void*, PtrList*, 1031, AnyObj::C_HEAP, mtThread,
|
||||
&ObjectMonitorsHashtable::ptr_hash> PtrTable;
|
||||
PtrTable* _ptrs;
|
||||
size_t _key_count;
|
||||
size_t _om_count;
|
||||
|
||||
public:
|
||||
// ResourceHashtable is passed to various functions and populated in
|
||||
// different places so we allocate it using C_HEAP to make it immune
|
||||
// from any ResourceMarks that happen to be in the code paths.
|
||||
ObjectMonitorsHashtable() : _ptrs(new (mtThread) PtrTable), _key_count(0), _om_count(0) {}
|
||||
|
||||
~ObjectMonitorsHashtable();
|
||||
|
||||
void add_entry(void* key, ObjectMonitor* om);
|
||||
|
||||
void add_entry(void* key, PtrList* list) {
|
||||
_ptrs->put(key, list);
|
||||
_key_count++;
|
||||
}
|
||||
|
||||
PtrList* get_entry(void* key) {
|
||||
PtrList** listpp = _ptrs->get(key);
|
||||
return (listpp == nullptr) ? nullptr : *listpp;
|
||||
}
|
||||
|
||||
bool has_entry(void* key, ObjectMonitor* om);
|
||||
|
||||
size_t key_count() { return _key_count; }
|
||||
size_t om_count() { return _om_count; }
|
||||
};
|
||||
|
||||
class MonitorList {
|
||||
friend class VMStructs;
|
||||
|
||||
@ -172,29 +123,30 @@ class ObjectSynchronizer : AllStatic {
|
||||
// JNI detach support
|
||||
static void release_monitors_owned_by_thread(JavaThread* current);
|
||||
|
||||
// Iterate ObjectMonitors owned by any thread and where the owner `filter`
|
||||
// returns true.
|
||||
template <typename OwnerFilter>
|
||||
static void owned_monitors_iterate_filtered(MonitorClosure* closure, OwnerFilter filter);
|
||||
|
||||
// Iterate ObjectMonitors where the owner == thread; this does NOT include
|
||||
// ObjectMonitors where owner is set to a stack lock address in thread:
|
||||
//
|
||||
// This version of monitors_iterate() works with the in-use monitor list.
|
||||
static void monitors_iterate(MonitorClosure* m, JavaThread* thread);
|
||||
// This version of monitors_iterate() works with the specified linked list.
|
||||
static void monitors_iterate(MonitorClosure* closure,
|
||||
ObjectMonitorsHashtable::PtrList* list,
|
||||
JavaThread* thread);
|
||||
// ObjectMonitors where owner is set to a stack lock address in thread.
|
||||
static void owned_monitors_iterate(MonitorClosure* m, JavaThread* thread);
|
||||
|
||||
// Iterate ObjectMonitors owned by any thread.
|
||||
static void owned_monitors_iterate(MonitorClosure* closure);
|
||||
|
||||
// Initialize the gInflationLocks
|
||||
static void initialize();
|
||||
|
||||
// GC: we currently use aggressive monitor deflation policy
|
||||
// Basically we try to deflate all monitors that are not busy.
|
||||
static size_t deflate_idle_monitors(ObjectMonitorsHashtable* table);
|
||||
// We currently use aggressive monitor deflation policy;
|
||||
// basically we try to deflate all monitors that are not busy.
|
||||
static size_t deflate_idle_monitors();
|
||||
|
||||
// Deflate idle monitors:
|
||||
static void chk_for_block_req(JavaThread* current, const char* op_name,
|
||||
const char* cnt_name, size_t cnt, LogStream* ls,
|
||||
elapsedTimer* timer_p);
|
||||
static size_t deflate_monitor_list(Thread* current, LogStream* ls, elapsedTimer* timer_p,
|
||||
ObjectMonitorsHashtable* table);
|
||||
static size_t deflate_monitor_list(Thread* current, LogStream* ls, elapsedTimer* timer_p);
|
||||
static size_t in_use_list_ceiling();
|
||||
static void dec_in_use_list_ceiling();
|
||||
static void inc_in_use_list_ceiling();
|
||||
@ -204,7 +156,8 @@ class ObjectSynchronizer : AllStatic {
|
||||
static bool is_final_audit() { return _is_final_audit; }
|
||||
static void set_is_final_audit() { _is_final_audit = true; }
|
||||
static jlong last_async_deflation_time_ns() { return _last_async_deflation_time_ns; }
|
||||
static bool request_deflate_idle_monitors(); // for whitebox test support
|
||||
static void request_deflate_idle_monitors();
|
||||
static bool request_deflate_idle_monitors_from_wb(); // for whitebox test support
|
||||
static void set_is_async_deflation_requested(bool new_value) { _is_async_deflation_requested = new_value; }
|
||||
static jlong time_since_last_async_deflation_ms();
|
||||
|
||||
@ -252,4 +205,11 @@ class ObjectLocker : public StackObj {
|
||||
void notify_all(TRAPS) { ObjectSynchronizer::notifyall(_obj, CHECK); }
|
||||
};
|
||||
|
||||
// Interface to visit monitors
|
||||
class ObjectMonitorsView {
|
||||
public:
|
||||
// Visit monitors that belong to the given thread
|
||||
virtual void visit(MonitorClosure* closure, JavaThread* thread) = 0;
|
||||
};
|
||||
|
||||
#endif // SHARE_RUNTIME_SYNCHRONIZER_HPP
|
||||
|
@ -43,12 +43,14 @@
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/javaThread.inline.hpp"
|
||||
#include "runtime/jniHandles.hpp"
|
||||
#include "runtime/objectMonitor.inline.hpp"
|
||||
#include "runtime/stackFrameStream.inline.hpp"
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "runtime/threads.hpp"
|
||||
#include "runtime/threadSMR.inline.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "services/threadService.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
#define VM_OP_NAME_INITIALIZE(name) #name,
|
||||
|
||||
@ -265,6 +267,105 @@ void VM_ThreadDump::doit_epilogue() {
|
||||
}
|
||||
}
|
||||
|
||||
// Hash table of void* to a list of ObjectMonitor* owned by the JavaThread.
|
||||
// The JavaThread's owner key is either a JavaThread* or a stack lock
|
||||
// address in the JavaThread so we use "void*".
|
||||
//
|
||||
class ObjectMonitorsDump : public MonitorClosure, public ObjectMonitorsView {
|
||||
private:
|
||||
static unsigned int ptr_hash(void* const& s1) {
|
||||
// 2654435761 = 2^32 * Phi (golden ratio)
|
||||
return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u);
|
||||
}
|
||||
|
||||
private:
|
||||
class ObjectMonitorLinkedList :
|
||||
public LinkedListImpl<ObjectMonitor*,
|
||||
AnyObj::C_HEAP, mtThread,
|
||||
AllocFailStrategy::RETURN_NULL> {};
|
||||
|
||||
// ResourceHashtable SIZE is specified at compile time so we
|
||||
// use 1031 which is the first prime after 1024.
|
||||
typedef ResourceHashtable<void*, ObjectMonitorLinkedList*, 1031, AnyObj::C_HEAP, mtThread,
|
||||
&ObjectMonitorsDump::ptr_hash> PtrTable;
|
||||
PtrTable* _ptrs;
|
||||
size_t _key_count;
|
||||
size_t _om_count;
|
||||
|
||||
void add_list(void* key, ObjectMonitorLinkedList* list) {
|
||||
_ptrs->put(key, list);
|
||||
_key_count++;
|
||||
}
|
||||
|
||||
ObjectMonitorLinkedList* get_list(void* key) {
|
||||
ObjectMonitorLinkedList** listpp = _ptrs->get(key);
|
||||
return (listpp == nullptr) ? nullptr : *listpp;
|
||||
}
|
||||
|
||||
void add(ObjectMonitor* monitor) {
|
||||
void* key = monitor->owner();
|
||||
|
||||
ObjectMonitorLinkedList* list = get_list(key);
|
||||
if (list == nullptr) {
|
||||
// Create new list and add it to the hash table:
|
||||
list = new (mtThread) ObjectMonitorLinkedList;
|
||||
_ptrs->put(key, list);
|
||||
_key_count++;
|
||||
}
|
||||
|
||||
assert(list->find(monitor) == nullptr, "Should not contain duplicates");
|
||||
list->add(monitor); // Add the ObjectMonitor to the list.
|
||||
_om_count++;
|
||||
}
|
||||
|
||||
public:
|
||||
// ResourceHashtable is passed to various functions and populated in
|
||||
// different places so we allocate it using C_HEAP to make it immune
|
||||
// from any ResourceMarks that happen to be in the code paths.
|
||||
ObjectMonitorsDump() : _ptrs(new (mtThread) PtrTable), _key_count(0), _om_count(0) {}
|
||||
|
||||
~ObjectMonitorsDump() {
|
||||
class CleanupObjectMonitorsDump: StackObj {
|
||||
public:
|
||||
bool do_entry(void*& key, ObjectMonitorLinkedList*& list) {
|
||||
list->clear(); // clear the LinkListNodes
|
||||
delete list; // then delete the LinkedList
|
||||
return true;
|
||||
}
|
||||
} cleanup;
|
||||
|
||||
_ptrs->unlink(&cleanup); // cleanup the LinkedLists
|
||||
delete _ptrs; // then delete the hash table
|
||||
}
|
||||
|
||||
// Implements MonitorClosure used to collect all owned monitors in the system
|
||||
void do_monitor(ObjectMonitor* monitor) override {
|
||||
assert(monitor->has_owner(), "Expects only owned monitors");
|
||||
|
||||
if (monitor->is_owner_anonymous()) {
|
||||
// There's no need to collect anonymous owned monitors
|
||||
// because the callers of this code is only interested
|
||||
// in JNI owned monitors.
|
||||
return;
|
||||
}
|
||||
|
||||
add(monitor);
|
||||
}
|
||||
|
||||
// Implements the ObjectMonitorsView interface
|
||||
void visit(MonitorClosure* closure, JavaThread* thread) override {
|
||||
ObjectMonitorLinkedList* list = get_list(thread);
|
||||
LinkedListIterator<ObjectMonitor*> iter(list != nullptr ? list->head() : nullptr);
|
||||
while (!iter.is_empty()) {
|
||||
ObjectMonitor* monitor = *iter.next();
|
||||
closure->do_monitor(monitor);
|
||||
}
|
||||
}
|
||||
|
||||
size_t key_count() { return _key_count; }
|
||||
size_t om_count() { return _om_count; }
|
||||
};
|
||||
|
||||
void VM_ThreadDump::doit() {
|
||||
ResourceMark rm;
|
||||
|
||||
@ -279,16 +380,20 @@ void VM_ThreadDump::doit() {
|
||||
concurrent_locks.dump_at_safepoint();
|
||||
}
|
||||
|
||||
ObjectMonitorsHashtable table;
|
||||
ObjectMonitorsHashtable* tablep = nullptr;
|
||||
ObjectMonitorsDump object_monitors;
|
||||
if (_with_locked_monitors) {
|
||||
// The caller wants locked monitor information and that's expensive to gather
|
||||
// when there are a lot of inflated monitors. So we deflate idle monitors and
|
||||
// gather information about owned monitors at the same time.
|
||||
tablep = &table;
|
||||
while (ObjectSynchronizer::deflate_idle_monitors(tablep) > 0) {
|
||||
; /* empty */
|
||||
}
|
||||
// Gather information about owned monitors.
|
||||
ObjectSynchronizer::owned_monitors_iterate(&object_monitors);
|
||||
|
||||
// If there are many object monitors in the system then the above iteration
|
||||
// can start to take time. Be friendly to following thread dumps by telling
|
||||
// the MonitorDeflationThread to deflate monitors.
|
||||
//
|
||||
// This is trying to be somewhat backwards compatible with the previous
|
||||
// implementation, which performed monitor deflation right here. We might
|
||||
// want to reconsider the need to trigger monitor deflation from the thread
|
||||
// dumping and instead maybe tweak the deflation heuristics.
|
||||
ObjectSynchronizer::request_deflate_idle_monitors();
|
||||
}
|
||||
|
||||
if (_num_threads == 0) {
|
||||
@ -305,7 +410,7 @@ void VM_ThreadDump::doit() {
|
||||
if (_with_locked_synchronizers) {
|
||||
tcl = concurrent_locks.thread_concurrent_locks(jt);
|
||||
}
|
||||
snapshot_thread(jt, tcl, tablep);
|
||||
snapshot_thread(jt, tcl, &object_monitors);
|
||||
}
|
||||
} else {
|
||||
// Snapshot threads in the given _threads array
|
||||
@ -340,15 +445,15 @@ void VM_ThreadDump::doit() {
|
||||
if (_with_locked_synchronizers) {
|
||||
tcl = concurrent_locks.thread_concurrent_locks(jt);
|
||||
}
|
||||
snapshot_thread(jt, tcl, tablep);
|
||||
snapshot_thread(jt, tcl, &object_monitors);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl,
|
||||
ObjectMonitorsHashtable* table) {
|
||||
ObjectMonitorsView* monitors) {
|
||||
ThreadSnapshot* snapshot = _result->add_thread_snapshot(java_thread);
|
||||
snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors, table, false);
|
||||
snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors, monitors, false);
|
||||
snapshot->set_concurrent_locks(tcl);
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include "runtime/vmOperation.hpp"
|
||||
#include "runtime/threadSMR.hpp"
|
||||
|
||||
class ObjectMonitorsHashtable;
|
||||
class ObjectMonitorsView;
|
||||
|
||||
// A hodge podge of commonly used VM Operations
|
||||
|
||||
@ -204,7 +204,7 @@ class VM_ThreadDump : public VM_Operation {
|
||||
bool _with_locked_synchronizers;
|
||||
|
||||
void snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl,
|
||||
ObjectMonitorsHashtable* table);
|
||||
ObjectMonitorsView* monitors);
|
||||
|
||||
public:
|
||||
VM_ThreadDump(ThreadDumpResult* result,
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/javaThread.inline.hpp"
|
||||
#include "runtime/objectMonitor.inline.hpp"
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/threads.hpp"
|
||||
#include "runtime/threadSMR.inline.hpp"
|
||||
@ -687,7 +688,7 @@ ThreadStackTrace::~ThreadStackTrace() {
|
||||
}
|
||||
}
|
||||
|
||||
void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth, ObjectMonitorsHashtable* table, bool full) {
|
||||
void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth, ObjectMonitorsView* monitors, bool full) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
|
||||
|
||||
if (_thread->has_last_Java_frame()) {
|
||||
@ -723,17 +724,7 @@ void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth, ObjectMonitorsHasht
|
||||
// Iterate inflated monitors and find monitors locked by this thread
|
||||
// that are not found in the stack, e.g. JNI locked monitors:
|
||||
InflatedMonitorsClosure imc(this);
|
||||
if (table != nullptr) {
|
||||
// Get the ObjectMonitors locked by the target thread, if any,
|
||||
// and does not include any where owner is set to a stack lock
|
||||
// address in the target thread:
|
||||
ObjectMonitorsHashtable::PtrList* list = table->get_entry(_thread);
|
||||
if (list != nullptr) {
|
||||
ObjectSynchronizer::monitors_iterate(&imc, list, _thread);
|
||||
}
|
||||
} else {
|
||||
ObjectSynchronizer::monitors_iterate(&imc, _thread);
|
||||
}
|
||||
monitors->visit(&imc, _thread);
|
||||
}
|
||||
}
|
||||
|
||||
@ -988,9 +979,9 @@ ThreadSnapshot::~ThreadSnapshot() {
|
||||
}
|
||||
|
||||
void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors,
|
||||
ObjectMonitorsHashtable* table, bool full) {
|
||||
ObjectMonitorsView* monitors, bool full) {
|
||||
_stack_trace = new ThreadStackTrace(_thread, with_locked_monitors);
|
||||
_stack_trace->dump_stack_at_safepoint(max_depth, table, full);
|
||||
_stack_trace->dump_stack_at_safepoint(max_depth, monitors, full);
|
||||
}
|
||||
|
||||
|
||||
|
@ -38,7 +38,7 @@
|
||||
#include "services/management.hpp"
|
||||
|
||||
class DeadlockCycle;
|
||||
class ObjectMonitorsHashtable;
|
||||
class ObjectMonitorsView;
|
||||
class OopClosure;
|
||||
class StackFrameInfo;
|
||||
class ThreadConcurrentLocks;
|
||||
@ -264,7 +264,7 @@ public:
|
||||
ThreadConcurrentLocks* get_concurrent_locks() { return _concurrent_locks; }
|
||||
|
||||
void dump_stack_at_safepoint(int max_depth, bool with_locked_monitors,
|
||||
ObjectMonitorsHashtable* table, bool full);
|
||||
ObjectMonitorsView* monitors, bool full);
|
||||
void set_concurrent_locks(ThreadConcurrentLocks* l) { _concurrent_locks = l; }
|
||||
void metadata_do(void f(Metadata*));
|
||||
};
|
||||
@ -287,7 +287,7 @@ class ThreadStackTrace : public CHeapObj<mtInternal> {
|
||||
int get_stack_depth() { return _depth; }
|
||||
|
||||
void add_stack_frame(javaVFrame* jvf);
|
||||
void dump_stack_at_safepoint(int max_depth, ObjectMonitorsHashtable* table, bool full);
|
||||
void dump_stack_at_safepoint(int max_depth, ObjectMonitorsView* monitors, bool full);
|
||||
Handle allocate_fill_stack_trace_element_array(TRAPS);
|
||||
void metadata_do(void f(Metadata*));
|
||||
GrowableArray<OopHandle>* jni_locked_monitors() { return _jni_locked_monitors; }
|
||||
|
@ -388,6 +388,7 @@ tier1_runtime = \
|
||||
-runtime/modules/LoadUnloadModuleStress.java \
|
||||
-runtime/modules/ModuleStress/ExportModuleStressTest.java \
|
||||
-runtime/modules/ModuleStress/ModuleStressGC.java \
|
||||
-runtime/Monitor/ConcurrentDeflation.java \
|
||||
-runtime/ReservedStack \
|
||||
-runtime/SelectionResolution/AbstractMethodErrorTest.java \
|
||||
-runtime/SelectionResolution/IllegalAccessErrorTest.java \
|
||||
|
81
test/hotspot/jtreg/runtime/Monitor/ConcurrentDeflation.java
Normal file
81
test/hotspot/jtreg/runtime/Monitor/ConcurrentDeflation.java
Normal file
@ -0,0 +1,81 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
import jdk.test.lib.Platform;
|
||||
import jdk.test.lib.process.ProcessTools;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.ThreadInfo;
|
||||
import java.lang.management.ThreadMXBean;
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8318757
|
||||
* @summary Test concurrent monitor deflation by MonitorDeflationThread and thread dumping
|
||||
* @library /test/lib
|
||||
* @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:GuaranteedAsyncDeflationInterval=2000 -XX:LockingMode=0 ConcurrentDeflation
|
||||
*/
|
||||
|
||||
public class ConcurrentDeflation {
|
||||
public static final long TOTAL_RUN_TIME_NS = 10_000_000_000L;
|
||||
public static Object[] monitors = new Object[1000];
|
||||
public static int monitorCount;
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
Thread threadDumper = new Thread(() -> dumpThreads());
|
||||
threadDumper.start();
|
||||
Thread monitorCreator = new Thread(() -> createMonitors());
|
||||
monitorCreator.start();
|
||||
|
||||
threadDumper.join();
|
||||
monitorCreator.join();
|
||||
}
|
||||
|
||||
static private void dumpThreads() {
|
||||
ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
|
||||
int dumpCount = 0;
|
||||
long startTime = System.nanoTime();
|
||||
while (System.nanoTime() - startTime < TOTAL_RUN_TIME_NS) {
|
||||
threadBean.dumpAllThreads(true, false);
|
||||
dumpCount++;
|
||||
try {
|
||||
Thread.sleep(10);
|
||||
} catch (InterruptedException e) {}
|
||||
}
|
||||
System.out.println("Dumped all thread info " + dumpCount + " times");
|
||||
}
|
||||
|
||||
static private void createMonitors() {
|
||||
int index = 0;
|
||||
long startTime = System.nanoTime();
|
||||
while (System.nanoTime() - startTime < TOTAL_RUN_TIME_NS) {
|
||||
index = index++ % 1000;
|
||||
monitors[index] = new Object();
|
||||
synchronized (monitors[index]) {
|
||||
monitorCount++;
|
||||
}
|
||||
}
|
||||
System.out.println("Created " + monitorCount + " monitors");
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user