8213150: Add verification for locking by VMThread

Extend verification for all locking not just VMOperations, and fix CLDG lock to not be taken by VM thread.

Reviewed-by: rehn, dholmes
This commit is contained in:
Coleen Phillimore 2019-09-24 10:12:56 -04:00
parent 11d43732bc
commit e49e9292d3
14 changed files with 148 additions and 125 deletions

View File

@ -285,17 +285,27 @@ void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) {
} }
} }
// Closure for locking and iterating through classes. // Closure for locking and iterating through classes. Only lock outside of safepoint.
LockedClassesDo::LockedClassesDo(classes_do_func_t f) : _function(f) { LockedClassesDo::LockedClassesDo(classes_do_func_t f) : _function(f),
_do_lock(!SafepointSynchronize::is_at_safepoint()) {
if (_do_lock) {
ClassLoaderDataGraph_lock->lock(); ClassLoaderDataGraph_lock->lock();
}
} }
LockedClassesDo::LockedClassesDo() : _function(NULL) { LockedClassesDo::LockedClassesDo() : _function(NULL),
_do_lock(!SafepointSynchronize::is_at_safepoint()) {
// callers provide their own do_klass // callers provide their own do_klass
if (_do_lock) {
ClassLoaderDataGraph_lock->lock(); ClassLoaderDataGraph_lock->lock();
}
} }
LockedClassesDo::~LockedClassesDo() { ClassLoaderDataGraph_lock->unlock(); } LockedClassesDo::~LockedClassesDo() {
if (_do_lock) {
ClassLoaderDataGraph_lock->unlock();
}
}
// Iterating over the CLDG needs to be locked because // Iterating over the CLDG needs to be locked because

View File

@ -156,6 +156,7 @@ class ClassLoaderDataGraph : public AllStatic {
class LockedClassesDo : public KlassClosure { class LockedClassesDo : public KlassClosure {
typedef void (*classes_do_func_t)(Klass*); typedef void (*classes_do_func_t)(Klass*);
classes_do_func_t _function; classes_do_func_t _function;
bool _do_lock;
public: public:
LockedClassesDo(); // For callers who provide their own do_klass LockedClassesDo(); // For callers who provide their own do_klass
LockedClassesDo(classes_do_func_t function); LockedClassesDo(classes_do_func_t function);

View File

@ -171,9 +171,8 @@ void MemAllocator::Allocation::check_for_valid_allocation_state() const {
// This is a VM policy failure, so how do we exhaustively test it? // This is a VM policy failure, so how do we exhaustively test it?
assert(!_thread->has_pending_exception(), assert(!_thread->has_pending_exception(),
"shouldn't be allocating with pending exception"); "shouldn't be allocating with pending exception");
// Allocation of an oop can always invoke a safepoint, // Allocation of an oop can always invoke a safepoint.
// hence, the true argument. _thread->check_for_valid_safepoint_state();
_thread->check_for_valid_safepoint_state(true);
} }
#endif #endif

View File

@ -556,7 +556,8 @@ Klass* ConstantPool::klass_at_if_loaded(const constantPoolHandle& this_cp, int w
Handle h_loader (thread, loader); Handle h_loader (thread, loader);
Klass* k = SystemDictionary::find(name, h_loader, h_prot, thread); Klass* k = SystemDictionary::find(name, h_loader, h_prot, thread);
if (k != NULL) { // Avoid constant pool verification at a safepoint, which takes the Module_lock.
if (k != NULL && !SafepointSynchronize::is_at_safepoint()) {
// Make sure that resolving is legal // Make sure that resolving is legal
EXCEPTION_MARK; EXCEPTION_MARK;
// return NULL if verification fails // return NULL if verification fails

View File

@ -33,24 +33,46 @@
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
#ifdef ASSERT #ifdef ASSERT
void Mutex::check_safepoint_state(Thread* thread, bool do_safepoint_check) { void Mutex::check_block_state(Thread* thread) {
if (!_allow_vm_block && thread->is_VM_thread()) {
// JavaThreads are checked to make sure that they do not hold _allow_vm_block locks during operations
// that could safepoint. Make sure the vm thread never uses locks with _allow_vm_block == false.
fatal("VM thread could block on lock that may be held by a JavaThread during safepoint: %s", name());
}
assert(!os::ThreadCrashProtection::is_crash_protected(thread),
"locking not allowed when crash protection is set");
}
void Mutex::check_safepoint_state(Thread* thread) {
check_block_state(thread);
// If the JavaThread checks for safepoint, verify that the lock wasn't created with safepoint_check_never. // If the JavaThread checks for safepoint, verify that the lock wasn't created with safepoint_check_never.
SafepointCheckRequired not_allowed = do_safepoint_check ? Mutex::_safepoint_check_never : if (thread->is_active_Java_thread()) {
Mutex::_safepoint_check_always; assert(_safepoint_check_required != _safepoint_check_never,
assert(!thread->is_active_Java_thread() || _safepoint_check_required != not_allowed,
"This lock should %s have a safepoint check for Java threads: %s", "This lock should %s have a safepoint check for Java threads: %s",
_safepoint_check_required ? "always" : "never", name()); _safepoint_check_required ? "always" : "never", name());
// If defined with safepoint_check_never, a NonJavaThread should never ask to safepoint check either. // Also check NoSafepointVerifier, and thread state is _thread_in_vm
assert(thread->is_Java_thread() || !do_safepoint_check || _safepoint_check_required != Mutex::_safepoint_check_never, thread->check_for_valid_safepoint_state();
} else {
// If initialized with safepoint_check_never, a NonJavaThread should never ask to safepoint check either.
assert(_safepoint_check_required != _safepoint_check_never,
"NonJavaThread should not check for safepoint"); "NonJavaThread should not check for safepoint");
}
}
void Mutex::check_no_safepoint_state(Thread* thread) {
check_block_state(thread);
assert(!thread->is_active_Java_thread() || _safepoint_check_required != _safepoint_check_always,
"This lock should %s have a safepoint check for Java threads: %s",
_safepoint_check_required ? "always" : "never", name());
} }
#endif // ASSERT #endif // ASSERT
void Mutex::lock(Thread * self) { void Mutex::lock(Thread* self) {
check_safepoint_state(self, true); check_safepoint_state(self);
DEBUG_ONLY(check_prelock_state(self, true));
assert(_owner != self, "invariant"); assert(_owner != self, "invariant");
Mutex* in_flight_mutex = NULL; Mutex* in_flight_mutex = NULL;
@ -60,7 +82,6 @@ void Mutex::lock(Thread * self) {
// The lock is contended // The lock is contended
#ifdef ASSERT #ifdef ASSERT
check_block_state(self);
if (retry_cnt++ > 3) { if (retry_cnt++ > 3) {
log_trace(vmmutex)("JavaThread " INTPTR_FORMAT " on %d attempt trying to acquire vmmutex %s", p2i(self), retry_cnt, _name); log_trace(vmmutex)("JavaThread " INTPTR_FORMAT " on %d attempt trying to acquire vmmutex %s", p2i(self), retry_cnt, _name);
} }
@ -98,7 +119,7 @@ void Mutex::lock() {
// in the wrong way this can lead to a deadlock with the safepoint code. // in the wrong way this can lead to a deadlock with the safepoint code.
void Mutex::lock_without_safepoint_check(Thread * self) { void Mutex::lock_without_safepoint_check(Thread * self) {
check_safepoint_state(self, false); check_no_safepoint_state(self);
assert(_owner != self, "invariant"); assert(_owner != self, "invariant");
_lock.lock(); _lock.lock();
assert_owner(NULL); assert_owner(NULL);
@ -114,8 +135,9 @@ void Mutex::lock_without_safepoint_check() {
bool Mutex::try_lock() { bool Mutex::try_lock() {
Thread * const self = Thread::current(); Thread * const self = Thread::current();
DEBUG_ONLY(check_prelock_state(self, false);) // Some safepoint_check_always locks use try_lock, so cannot check
// safepoint state, but can check blocking state.
check_block_state(self);
if (_lock.try_lock()) { if (_lock.try_lock()) {
assert_owner(NULL); assert_owner(NULL);
set_owner(self); set_owner(self);
@ -160,7 +182,6 @@ void Monitor::assert_wait_lock_state(Thread* self) {
bool Monitor::wait_without_safepoint_check(long timeout) { bool Monitor::wait_without_safepoint_check(long timeout) {
Thread* const self = Thread::current(); Thread* const self = Thread::current();
check_safepoint_state(self, false);
// timeout is in milliseconds - with zero meaning never timeout // timeout is in milliseconds - with zero meaning never timeout
assert(timeout >= 0, "negative timeout"); assert(timeout >= 0, "negative timeout");
@ -171,6 +192,9 @@ bool Monitor::wait_without_safepoint_check(long timeout) {
// conceptually set the owner to NULL in anticipation of // conceptually set the owner to NULL in anticipation of
// abdicating the lock in wait // abdicating the lock in wait
set_owner(NULL); set_owner(NULL);
// Check safepoint state after resetting owner and possible NSV.
check_no_safepoint_state(self);
int wait_status = _lock.wait(timeout); int wait_status = _lock.wait(timeout);
set_owner(self); set_owner(self);
return wait_status != 0; // return true IFF timeout return wait_status != 0; // return true IFF timeout
@ -178,7 +202,6 @@ bool Monitor::wait_without_safepoint_check(long timeout) {
bool Monitor::wait(long timeout, bool as_suspend_equivalent) { bool Monitor::wait(long timeout, bool as_suspend_equivalent) {
Thread* const self = Thread::current(); Thread* const self = Thread::current();
check_safepoint_state(self, true);
// timeout is in milliseconds - with zero meaning never timeout // timeout is in milliseconds - with zero meaning never timeout
assert(timeout >= 0, "negative timeout"); assert(timeout >= 0, "negative timeout");
@ -193,6 +216,8 @@ bool Monitor::wait(long timeout, bool as_suspend_equivalent) {
// conceptually set the owner to NULL in anticipation of // conceptually set the owner to NULL in anticipation of
// abdicating the lock in wait // abdicating the lock in wait
set_owner(NULL); set_owner(NULL);
// Check safepoint state after resetting owner and possible NSV.
check_safepoint_state(self);
JavaThread *jt = (JavaThread *)self; JavaThread *jt = (JavaThread *)self;
Mutex* in_flight_mutex = NULL; Mutex* in_flight_mutex = NULL;
@ -255,7 +280,7 @@ Mutex::Mutex(int Rank, const char * name, bool allow_vm_block,
_rank = Rank; _rank = Rank;
_safepoint_check_required = safepoint_check_required; _safepoint_check_required = safepoint_check_required;
assert(_safepoint_check_required != Mutex::_safepoint_check_sometimes || is_sometimes_ok(name), assert(_safepoint_check_required != _safepoint_check_sometimes || is_sometimes_ok(name),
"Lock has _safepoint_check_sometimes %s", name); "Lock has _safepoint_check_sometimes %s", name);
#endif #endif
} }
@ -278,15 +303,27 @@ void Mutex::print_on_error(outputStream* st) const {
// Non-product code // Non-product code
#ifndef PRODUCT #ifndef PRODUCT
const char* print_safepoint_check(Mutex::SafepointCheckRequired safepoint_check) {
switch (safepoint_check) {
case Mutex::_safepoint_check_never: return "safepoint_check_never";
case Mutex::_safepoint_check_sometimes: return "safepoint_check_sometimes";
case Mutex::_safepoint_check_always: return "safepoint_check_always";
default: return "";
}
}
void Mutex::print_on(outputStream* st) const { void Mutex::print_on(outputStream* st) const {
st->print_cr("Mutex: [" PTR_FORMAT "] %s - owner: " PTR_FORMAT, st->print("Mutex: [" PTR_FORMAT "] %s - owner: " PTR_FORMAT,
p2i(this), _name, p2i(_owner)); p2i(this), _name, p2i(_owner));
if (_allow_vm_block) {
st->print("%s", " allow_vm_block");
}
st->print(" %s", print_safepoint_check(_safepoint_check_required));
st->cr();
} }
#endif #endif
#ifndef PRODUCT
#ifdef ASSERT #ifdef ASSERT
void Mutex::assert_owner(Thread * expected) { void Mutex::assert_owner(Thread * expected) {
const char* msg = "invalid owner"; const char* msg = "invalid owner";
if (expected == NULL) { if (expected == NULL) {
@ -340,7 +377,6 @@ Mutex* Mutex::get_least_ranked_lock_besides_this(Mutex* locks) {
return res; return res;
} }
bool Mutex::contains(Mutex* locks, Mutex* lock) { bool Mutex::contains(Mutex* locks, Mutex* lock) {
for (; locks != NULL; locks = locks->next()) { for (; locks != NULL; locks = locks->next()) {
if (locks == lock) { if (locks == lock) {
@ -349,7 +385,27 @@ bool Mutex::contains(Mutex* locks, Mutex* lock) {
} }
return false; return false;
} }
#endif
// NSV implied with locking allow_vm_block or !safepoint_check locks.
void Mutex::no_safepoint_verifier(Thread* thread, bool enable) {
// Threads_lock is special, since the safepoint synchronization will not start before this is
// acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
// since it is used to transfer control between JavaThreads and the VMThread
// Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first!
if ((_allow_vm_block &&
this != Threads_lock &&
this != Compile_lock && // Temporary: should not be necessary when we get separate compilation
this != tty_lock && // The tty_lock is released for the safepoint.
this != VMOperationRequest_lock &&
this != VMOperationQueue_lock) ||
rank() == Mutex::special) {
if (enable) {
thread->_no_safepoint_count++;
} else {
thread->_no_safepoint_count--;
}
}
}
// Called immediately after lock acquisition or release as a diagnostic // Called immediately after lock acquisition or release as a diagnostic
// to track the lock-set of the thread and test for rank violations that // to track the lock-set of the thread and test for rank violations that
@ -376,7 +432,6 @@ void Mutex::set_owner_implementation(Thread *new_owner) {
// link "this" into the owned locks list // link "this" into the owned locks list
#ifdef ASSERT // Thread::_owned_locks is under the same ifdef
Mutex* locks = get_least_ranked_lock(new_owner->owned_locks()); Mutex* locks = get_least_ranked_lock(new_owner->owned_locks());
// Mutex::set_owner_implementation is a friend of Thread // Mutex::set_owner_implementation is a friend of Thread
@ -401,20 +456,21 @@ void Mutex::set_owner_implementation(Thread *new_owner) {
this->_next = new_owner->_owned_locks; this->_next = new_owner->_owned_locks;
new_owner->_owned_locks = this; new_owner->_owned_locks = this;
#endif
// NSV implied with locking allow_vm_block flag.
no_safepoint_verifier(new_owner, true);
} else { } else {
// the thread is releasing this lock // the thread is releasing this lock
Thread* old_owner = _owner; Thread* old_owner = _owner;
DEBUG_ONLY(_last_owner = old_owner;) _last_owner = old_owner;
assert(old_owner != NULL, "removing the owner thread of an unowned mutex"); assert(old_owner != NULL, "removing the owner thread of an unowned mutex");
assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex"); assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex");
_owner = NULL; // set the owner _owner = NULL; // set the owner
#ifdef ASSERT
Mutex* locks = old_owner->owned_locks(); Mutex* locks = old_owner->owned_locks();
// remove "this" from the owned locks list // remove "this" from the owned locks list
@ -434,33 +490,9 @@ void Mutex::set_owner_implementation(Thread *new_owner) {
prev->_next = _next; prev->_next = _next;
} }
_next = NULL; _next = NULL;
#endif
// ~NSV implied with locking allow_vm_block flag.
no_safepoint_verifier(old_owner, false);
} }
} }
#endif // ASSERT
// Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock()
void Mutex::check_prelock_state(Thread *thread, bool safepoint_check) {
if (safepoint_check) {
assert((!thread->is_active_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm)
|| rank() == Mutex::special, "wrong thread state for using locks");
if (thread->is_VM_thread() && !allow_vm_block()) {
fatal("VM thread using lock %s (not allowed to block on)", name());
}
DEBUG_ONLY(if (rank() != Mutex::special) \
thread->check_for_valid_safepoint_state(false);)
}
assert(!os::ThreadCrashProtection::is_crash_protected(thread),
"locking not allowed when crash protection is set");
}
void Mutex::check_block_state(Thread *thread) {
if (!_allow_vm_block && thread->is_VM_thread()) {
warning("VM thread blocked on lock");
print();
BREAKPOINT;
}
assert(_owner != thread, "deadlock: blocking on mutex owned by current thread");
}
#endif // PRODUCT

View File

@ -81,21 +81,22 @@ class Mutex : public CHeapObj<mtSynchronizer> {
char _name[MUTEX_NAME_LEN]; // Name of mutex/monitor char _name[MUTEX_NAME_LEN]; // Name of mutex/monitor
// Debugging fields for naming, deadlock detection, etc. (some only used in debug mode) // Debugging fields for naming, deadlock detection, etc. (some only used in debug mode)
#ifndef PRODUCT #ifdef ASSERT
bool _allow_vm_block; bool _allow_vm_block;
DEBUG_ONLY(int _rank;) // rank (to avoid/detect potential deadlocks) int _rank; // rank (to avoid/detect potential deadlocks)
DEBUG_ONLY(Mutex* _next;) // Used by a Thread to link up owned locks Mutex* _next; // Used by a Thread to link up owned locks
DEBUG_ONLY(Thread* _last_owner;) // the last thread to own the lock Thread* _last_owner; // the last thread to own the lock
DEBUG_ONLY(static bool contains(Mutex* locks, Mutex* lock);) static bool contains(Mutex* locks, Mutex* lock);
DEBUG_ONLY(static Mutex* get_least_ranked_lock(Mutex* locks);) static Mutex* get_least_ranked_lock(Mutex* locks);
DEBUG_ONLY(Mutex* get_least_ranked_lock_besides_this(Mutex* locks);) Mutex* get_least_ranked_lock_besides_this(Mutex* locks);
#endif #endif // ASSERT
void set_owner_implementation(Thread* owner) PRODUCT_RETURN; void set_owner_implementation(Thread* owner) NOT_DEBUG({ _owner = owner;});
void check_prelock_state (Thread* thread, bool safepoint_check) PRODUCT_RETURN; void check_block_state (Thread* thread) NOT_DEBUG_RETURN;
void check_block_state (Thread* thread) PRODUCT_RETURN; void check_safepoint_state (Thread* thread) NOT_DEBUG_RETURN;
void check_safepoint_state (Thread* thread, bool safepoint_check) NOT_DEBUG_RETURN; void check_no_safepoint_state(Thread* thread) NOT_DEBUG_RETURN;
void assert_owner (Thread* expected) NOT_DEBUG_RETURN; void assert_owner (Thread* expected) NOT_DEBUG_RETURN;
void no_safepoint_verifier (Thread* thread, bool enable) NOT_DEBUG_RETURN;
public: public:
enum { enum {
@ -171,21 +172,16 @@ class Mutex : public CHeapObj<mtSynchronizer> {
#ifndef PRODUCT #ifndef PRODUCT
void print_on(outputStream* st) const; void print_on(outputStream* st) const;
void print() const { print_on(::tty); } void print() const { print_on(::tty); }
DEBUG_ONLY(int rank() const { return _rank; }) #endif
#ifdef ASSERT
int rank() const { return _rank; }
bool allow_vm_block() { return _allow_vm_block; } bool allow_vm_block() { return _allow_vm_block; }
DEBUG_ONLY(Mutex *next() const { return _next; }) Mutex *next() const { return _next; }
DEBUG_ONLY(void set_next(Mutex *next) { _next = next; }) void set_next(Mutex *next) { _next = next; }
#endif #endif // ASSERT
void set_owner(Thread* owner) { void set_owner(Thread* owner) { set_owner_implementation(owner); }
#ifndef PRODUCT
set_owner_implementation(owner);
DEBUG_ONLY(void verify_mutex(Thread* thr);)
#else
_owner = owner;
#endif
}
}; };
class Monitor : public Mutex { class Monitor : public Mutex {

View File

@ -230,7 +230,7 @@ void mutex_init() {
def(OopMapCacheAlloc_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // used for oop_map_cache allocation. def(OopMapCacheAlloc_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // used for oop_map_cache allocation.
def(MetaspaceExpand_lock , PaddedMutex , leaf-1, true, Monitor::_safepoint_check_never); def(MetaspaceExpand_lock , PaddedMutex , leaf-1, true, Monitor::_safepoint_check_never);
def(ClassLoaderDataGraph_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always); def(ClassLoaderDataGraph_lock , PaddedMutex , nonleaf, false, Monitor::_safepoint_check_always);
def(Patching_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never); // used for safepointing and code patching. def(Patching_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never); // used for safepointing and code patching.
def(CompiledMethod_lock , PaddedMutex , special-1, true, Monitor::_safepoint_check_never); def(CompiledMethod_lock , PaddedMutex , special-1, true, Monitor::_safepoint_check_never);
@ -240,7 +240,7 @@ void mutex_init() {
def(SystemDictionary_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_always); def(SystemDictionary_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_always);
def(ProtectionDomainSet_lock , PaddedMutex , leaf-1, true, Monitor::_safepoint_check_never); def(ProtectionDomainSet_lock , PaddedMutex , leaf-1, true, Monitor::_safepoint_check_never);
def(SharedDictionary_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); def(SharedDictionary_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always);
def(Module_lock , PaddedMutex , leaf+2, true, Monitor::_safepoint_check_always); def(Module_lock , PaddedMutex , leaf+2, false, Monitor::_safepoint_check_always);
def(InlineCacheBuffer_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never); def(InlineCacheBuffer_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
def(VMStatistic_lock , PaddedMutex , leaf, false, Monitor::_safepoint_check_always); def(VMStatistic_lock , PaddedMutex , leaf, false, Monitor::_safepoint_check_always);
def(ExpandHeap_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // Used during compilation by VM thread def(ExpandHeap_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // Used during compilation by VM thread
@ -291,7 +291,7 @@ void mutex_init() {
def(MethodData_lock , PaddedMutex , nonleaf+3, false, Monitor::_safepoint_check_always); def(MethodData_lock , PaddedMutex , nonleaf+3, false, Monitor::_safepoint_check_always);
def(TouchedMethodLog_lock , PaddedMutex , nonleaf+3, false, Monitor::_safepoint_check_always); def(TouchedMethodLog_lock , PaddedMutex , nonleaf+3, false, Monitor::_safepoint_check_always);
def(MethodCompileQueue_lock , PaddedMonitor, nonleaf+4, true, Monitor::_safepoint_check_always); def(MethodCompileQueue_lock , PaddedMonitor, nonleaf+4, false, Monitor::_safepoint_check_always);
def(Debug2_lock , PaddedMutex , nonleaf+4, true, Monitor::_safepoint_check_never); def(Debug2_lock , PaddedMutex , nonleaf+4, true, Monitor::_safepoint_check_never);
def(Debug3_lock , PaddedMutex , nonleaf+4, true, Monitor::_safepoint_check_never); def(Debug3_lock , PaddedMutex , nonleaf+4, true, Monitor::_safepoint_check_never);
def(CompileThread_lock , PaddedMonitor, nonleaf+5, false, Monitor::_safepoint_check_always); def(CompileThread_lock , PaddedMonitor, nonleaf+5, false, Monitor::_safepoint_check_always);
@ -316,7 +316,7 @@ void mutex_init() {
def(CodeHeapStateAnalytics_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never); def(CodeHeapStateAnalytics_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
def(NMethodSweeperStats_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never); def(NMethodSweeperStats_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never);
def(ThreadsSMRDelete_lock , PaddedMonitor, special, false, Monitor::_safepoint_check_never); def(ThreadsSMRDelete_lock , PaddedMonitor, special, true, Monitor::_safepoint_check_never);
def(SharedDecoder_lock , PaddedMutex , native, false, Monitor::_safepoint_check_never); def(SharedDecoder_lock , PaddedMutex , native, false, Monitor::_safepoint_check_never);
def(DCmdFactory_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never); def(DCmdFactory_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
#if INCLUDE_NMT #if INCLUDE_NMT

View File

@ -149,6 +149,8 @@ extern Mutex* CodeHeapStateAnalytics_lock; // lock print functions against
extern Monitor* JVMCI_lock; // Monitor to control initialization of JVMCI extern Monitor* JVMCI_lock; // Monitor to control initialization of JVMCI
#endif #endif
extern Mutex* tty_lock; // lock to synchronize output.
// A MutexLocker provides mutual exclusion with respect to a given mutex // A MutexLocker provides mutual exclusion with respect to a given mutex
// for the scope which contains the locker. The lock is an OS lock, not // for the scope which contains the locker. The lock is an OS lock, not
// an object lock, and the two do not interoperate. Do not use Mutex-based // an object lock, and the two do not interoperate. Do not use Mutex-based

View File

@ -419,7 +419,7 @@ void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) { ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) {
_dolock = do_lock; _dolock = do_lock;
_thread = thread; _thread = thread;
_thread->check_for_valid_safepoint_state(false); _thread->check_for_valid_safepoint_state();
_obj = obj; _obj = obj;
if (_dolock) { if (_dolock) {

View File

@ -973,6 +973,7 @@ void Thread::check_possible_safepoint() {
if (!is_Java_thread()) return; if (!is_Java_thread()) return;
if (_no_safepoint_count > 0) { if (_no_safepoint_count > 0) {
print_owned_locks();
fatal("Possible safepoint reached by thread that does not allow it"); fatal("Possible safepoint reached by thread that does not allow it");
} }
#ifdef CHECK_UNHANDLED_OOPS #ifdef CHECK_UNHANDLED_OOPS
@ -981,37 +982,18 @@ void Thread::check_possible_safepoint() {
#endif // CHECK_UNHANDLED_OOPS #endif // CHECK_UNHANDLED_OOPS
} }
// The flag: potential_vm_operation notifies if this particular safepoint state could potentially void Thread::check_for_valid_safepoint_state() {
// invoke the vm-thread (e.g., an oop allocation). In that case, we also have to make sure that
// no locks which allow_vm_block's are held
void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) {
if (!is_Java_thread()) return; if (!is_Java_thread()) return;
// Check NoSafepointVerifier, which is implied by locks taken that can be
// shared with the VM thread. This makes sure that no locks with allow_vm_block
// are held.
check_possible_safepoint(); check_possible_safepoint();
if (((JavaThread*)this)->thread_state() != _thread_in_vm) { if (((JavaThread*)this)->thread_state() != _thread_in_vm) {
fatal("LEAF method calling lock?"); fatal("LEAF method calling lock?");
} }
if (potential_vm_operation && !Universe::is_bootstrapping()) {
// Make sure we do not hold any locks that the VM thread also uses.
// This could potentially lead to deadlocks
for (Mutex* cur = _owned_locks; cur; cur = cur->next()) {
// Threads_lock is special, since the safepoint synchronization will not start before this is
// acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
// since it is used to transfer control between JavaThreads and the VMThread
// Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first!
if ((cur->allow_vm_block() &&
cur != Threads_lock &&
cur != Compile_lock && // Temporary: should not be necessary when we get separate compilation
cur != VMOperationRequest_lock &&
cur != VMOperationQueue_lock) ||
cur->rank() == Mutex::special) {
fatal("Thread holding lock at safepoint that vm can block on: %s", cur->name());
}
}
}
if (GCALotAtAllSafepoints) { if (GCALotAtAllSafepoints) {
// We could enter a safepoint here and thus have a gc // We could enter a safepoint here and thus have a gc
InterfaceSupport::check_gc_alot(); InterfaceSupport::check_gc_alot();

View File

@ -753,7 +753,7 @@ protected:
// These functions check conditions on a JavaThread before possibly going to a safepoint, // These functions check conditions on a JavaThread before possibly going to a safepoint,
// including NoSafepointVerifier. // including NoSafepointVerifier.
void check_for_valid_safepoint_state(bool potential_vm_operation) NOT_DEBUG_RETURN; void check_for_valid_safepoint_state() NOT_DEBUG_RETURN;
void check_possible_safepoint() NOT_DEBUG_RETURN; void check_possible_safepoint() NOT_DEBUG_RETURN;
private: private:

View File

@ -669,7 +669,7 @@ void VMThread::execute(VM_Operation* op) {
bool concurrent = op->evaluate_concurrently(); bool concurrent = op->evaluate_concurrently();
// only blocking VM operations need to verify the caller's safepoint state: // only blocking VM operations need to verify the caller's safepoint state:
if (!concurrent) { if (!concurrent) {
t->check_for_valid_safepoint_state(true); t->check_for_valid_safepoint_state();
} }
// New request from Java thread, evaluate prologue // New request from Java thread, evaluate prologue

View File

@ -999,8 +999,8 @@ inline ConcurrentHashTable<CONFIG, F>::
{ {
_stats_rate = TableRateStatistics(); _stats_rate = TableRateStatistics();
_resize_lock = _resize_lock =
new Mutex(Mutex::leaf, "ConcurrentHashTable", false, new Mutex(Mutex::leaf, "ConcurrentHashTable", true,
Monitor::_safepoint_check_never); Mutex::_safepoint_check_never);
_table = new InternalTable(log2size); _table = new InternalTable(log2size);
assert(log2size_limit >= log2size, "bad ergo"); assert(log2size_limit >= log2size, "bad ergo");
_size_limit_reached = _table->_log2_size == _log2_size_limit; _size_limit_reached = _table->_log2_size == _log2_size_limit;

View File

@ -100,7 +100,7 @@ template <class T> class EventLogBase : public EventLog {
public: public:
EventLogBase<T>(const char* name, const char* handle, int length = LogEventsBufferEntries): EventLogBase<T>(const char* name, const char* handle, int length = LogEventsBufferEntries):
_mutex(Mutex::event, name, false, Monitor::_safepoint_check_never), _mutex(Mutex::event, name, true, Mutex::_safepoint_check_never),
_name(name), _name(name),
_handle(handle), _handle(handle),
_length(length), _length(length),