8235795: replace monitor list mux{Acquire,Release}(&gListLock) with spin locks

Reviewed-by: dholmes, coleenp, rehn
This commit is contained in:
Daniel D. Daugherty 2020-02-05 11:40:20 -05:00
parent 8ff24c55ef
commit a7a82b0c79
6 changed files with 749 additions and 310 deletions

@ -1987,7 +1987,7 @@ void ObjectMonitor::print_debug_style_on(outputStream* st) const {
st->print_cr("(ObjectMonitor*) " INTPTR_FORMAT " = {", p2i(this));
st->print_cr(" _header = " INTPTR_FORMAT, header().value());
st->print_cr(" _object = " INTPTR_FORMAT, p2i(_object));
st->print_cr(" _next_om = " INTPTR_FORMAT, p2i(_next_om));
st->print_cr(" _next_om = " INTPTR_FORMAT, p2i(next_om()));
st->print_cr(" _pad_buf0 = {");
st->print_cr(" [0] = '\\0'");
st->print_cr(" ...");

@ -136,20 +136,22 @@ class ObjectMonitor {
// Enforced by the assert() in header_addr().
volatile markWord _header; // displaced object header word - mark
void* volatile _object; // backward object pointer - strong root
public:
ObjectMonitor* _next_om; // Next ObjectMonitor* linkage
private:
// Separate _header and _owner on different cache lines since both can
// have busy multi-threaded access. _header and _object are set at
// initial inflation and _object doesn't change until deflation so
// _object is a good choice to share the cache line with _header.
// _next_om shares _header's cache line for pre-monitor list historical
// reasons. _next_om only changes if the next ObjectMonitor is deflated.
DEFINE_PAD_MINUS_SIZE(0, OM_CACHE_LINE_SIZE,
sizeof(volatile markWord) + sizeof(void* volatile) +
sizeof(ObjectMonitor *));
sizeof(volatile markWord) + sizeof(void* volatile));
void* volatile _owner; // pointer to owning thread OR BasicLock
volatile jlong _previous_owner_tid; // thread id of the previous owner of the monitor
// Separate _owner and _next_om on different cache lines since
// both can have busy multi-threaded access. _previous_owner_tid is only
// changed by ObjectMonitor::exit() so it is a good choice to share the
// cache line with _owner.
DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(void* volatile) +
sizeof(volatile jlong));
ObjectMonitor* _next_om; // Next ObjectMonitor* linkage
volatile intx _recursions; // recursion count, 0 for first entry
ObjectWaiter* volatile _EntryList; // Threads blocked on entry or reentry.
// The list is actually composed of WaitNodes,
@ -249,6 +251,14 @@ class ObjectMonitor {
// _owner field. Returns the prior value of the _owner field.
void* try_set_owner_from(void* old_value, void* new_value);
ObjectMonitor* next_om() const;
// Simply set _next_om field to new_value.
void set_next_om(ObjectMonitor* new_value);
// Try to set _next_om field to new_value if the current value matches
// old_value, using Atomic::cmpxchg(). Otherwise, does not change the
// _next_om field. Returns the prior value of the _next_om field.
ObjectMonitor* try_set_next_om(ObjectMonitor* old_value, ObjectMonitor* new_value);
jint waiters() const;
jint contentions() const;

@ -137,4 +137,24 @@ inline void* ObjectMonitor::try_set_owner_from(void* old_value, void* new_value)
return prev;
}
// The _next_om field can be concurrently read and modified so we
// use Atomic operations to disable compiler optimizations that
// might try to elide loading and/or storing this field.
inline ObjectMonitor* ObjectMonitor::next_om() const {
return Atomic::load(&_next_om);
}
// Simply set _next_om field to new_value.
inline void ObjectMonitor::set_next_om(ObjectMonitor* new_value) {
Atomic::store(&_next_om, new_value);
}
// Try to set _next_om field to new_value if the current value matches
// old_value. Otherwise, does not change the _next_om field. Returns
// the prior value of the _next_om field.
inline ObjectMonitor* ObjectMonitor::try_set_next_om(ObjectMonitor* old_value, ObjectMonitor* new_value) {
return Atomic::cmpxchg(&_next_om, old_value, new_value);
}
#endif // SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP

File diff suppressed because it is too large Load Diff

@ -138,6 +138,7 @@ class ObjectSynchronizer : AllStatic {
// For a given monitor list: global or per-thread, deflate idle monitors
static int deflate_monitor_list(ObjectMonitor** list_p,
int* count_p,
ObjectMonitor** free_head_p,
ObjectMonitor** free_tail_p);
static bool deflate_monitor(ObjectMonitor* mid, oop obj,
@ -165,7 +166,7 @@ class ObjectSynchronizer : AllStatic {
static void chk_per_thread_free_list_and_count(JavaThread *jt,
outputStream * out,
int *error_cnt_p);
static void log_in_use_monitor_details(outputStream * out, bool on_exit);
static void log_in_use_monitor_details(outputStream * out);
static int log_monitor_list_counts(outputStream * out);
static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
@ -174,14 +175,10 @@ class ObjectSynchronizer : AllStatic {
enum { _BLOCKSIZE = 128 };
// global list of blocks of monitors
static PaddedObjectMonitor* volatile g_block_list;
// global monitor free list
static ObjectMonitor* volatile g_free_list;
// global monitor in-use list, for moribund threads,
// monitors they inflated need to be scanned for deflation
static ObjectMonitor* volatile g_om_in_use_list;
// count of entries in g_om_in_use_list
static int g_om_in_use_count;
static PaddedObjectMonitor* g_block_list;
// Function to prepend new blocks to the appropriate lists:
static void prepend_block_to_lists(PaddedObjectMonitor* new_blk);
// Process oops in all global used monitors (i.e. moribund thread's monitors)
static void global_used_oops_do(OopClosure* f);

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -526,7 +526,6 @@ class Thread: public ThreadShadow {
os::set_native_thread_name(name);
}
ObjectMonitor** om_in_use_list_addr() { return (ObjectMonitor **)&om_in_use_list; }
Monitor* SR_lock() const { return _SR_lock; }
bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; }