8313202: MutexLocker should disallow null Mutexes

Reviewed-by: dholmes, coleenp, dcubed
This commit is contained in:
Aleksey Shipilev 2023-09-13 07:32:54 +00:00
parent 36552e7193
commit 2d168c5734
18 changed files with 97 additions and 64 deletions

View File

@ -938,7 +938,7 @@ void ClassLoader::load_java_library() {
}
void ClassLoader::release_load_zip_library() {
MutexLocker locker(Zip_lock, Monitor::_no_safepoint_check_flag);
ConditionalMutexLocker locker(Zip_lock, Zip_lock != nullptr, Monitor::_no_safepoint_check_flag);
if (_libzip_loaded == 0) {
load_zip_library();
Atomic::release_store(&_libzip_loaded, 1);

View File

@ -1545,10 +1545,10 @@ bool SystemDictionary::do_unloading(GCTimer* gc_timer) {
// First, mark for unload all ClassLoaderData referencing a dead class loader.
unloading_occurred = ClassLoaderDataGraph::do_unloading();
if (unloading_occurred) {
MutexLocker ml2(is_concurrent ? Module_lock : nullptr);
ConditionalMutexLocker ml2(Module_lock, is_concurrent);
JFR_ONLY(Jfr::on_unloading_classes();)
MANAGEMENT_ONLY(FinalizerService::purge_unloaded();)
MutexLocker ml1(is_concurrent ? SystemDictionary_lock : nullptr);
ConditionalMutexLocker ml1(SystemDictionary_lock, is_concurrent);
ClassLoaderDataGraph::clean_module_and_package_info();
LoaderConstraintTable::purge_loader_constraints();
ResolutionErrorTable::purge_resolution_errors();
@ -1571,7 +1571,7 @@ bool SystemDictionary::do_unloading(GCTimer* gc_timer) {
assert(ProtectionDomainCacheTable::number_of_entries() == 0, "should be empty");
}
MutexLocker ml(is_concurrent ? ClassInitError_lock : nullptr);
ConditionalMutexLocker ml(ClassInitError_lock, is_concurrent);
InstanceKlass::clean_initialization_error_table();
}

View File

@ -116,7 +116,7 @@ const char* CompiledMethod::state() const {
//-----------------------------------------------------------------------------
void CompiledMethod::set_deoptimized_done() {
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
if (_deoptimization_status != deoptimize_done) { // can't go backwards
Atomic::store(&_deoptimization_status, deoptimize_done);
}

View File

@ -1339,7 +1339,7 @@ bool nmethod::make_not_entrant() {
{
// Enter critical section. Does not block for safepoint.
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
if (Atomic::load(&_state) == not_entrant) {
// another thread already performed this transition so nothing

View File

@ -238,7 +238,7 @@ void StubQueue::verify() {
void StubQueue::print() {
MutexLocker lock(_mutex, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker lock(_mutex, _mutex != nullptr, Mutex::_no_safepoint_check_flag);
for (Stub* s = first(); s != nullptr; s = next(s)) {
stub_print(s);
}

View File

@ -2815,29 +2815,33 @@ void CompileBroker::print_heapinfo(outputStream* out, const char* function, size
!Compile_lock->owned_by_self();
bool should_take_CodeCache_lock = !SafepointSynchronize::is_at_safepoint() &&
!CodeCache_lock->owned_by_self();
Mutex* global_lock_1 = allFun ? (should_take_Compile_lock ? Compile_lock : nullptr) : nullptr;
Monitor* global_lock_2 = allFun ? (should_take_CodeCache_lock ? CodeCache_lock : nullptr) : nullptr;
Mutex* function_lock_1 = allFun ? nullptr : (should_take_Compile_lock ? Compile_lock : nullptr);
Monitor* function_lock_2 = allFun ? nullptr : (should_take_CodeCache_lock ? CodeCache_lock : nullptr);
bool take_global_lock_1 = allFun && should_take_Compile_lock;
bool take_global_lock_2 = allFun && should_take_CodeCache_lock;
bool take_function_lock_1 = !allFun && should_take_Compile_lock;
bool take_function_lock_2 = !allFun && should_take_CodeCache_lock;
bool take_global_locks = take_global_lock_1 || take_global_lock_2;
bool take_function_locks = take_function_lock_1 || take_function_lock_2;
ts_global.update(); // record starting point
MutexLocker mu1(global_lock_1, Mutex::_safepoint_check_flag);
MutexLocker mu2(global_lock_2, Mutex::_no_safepoint_check_flag);
if ((global_lock_1 != nullptr) || (global_lock_2 != nullptr)) {
ConditionalMutexLocker mu1(Compile_lock, take_global_lock_1, Mutex::_safepoint_check_flag);
ConditionalMutexLocker mu2(CodeCache_lock, take_global_lock_2, Mutex::_no_safepoint_check_flag);
if (take_global_locks) {
out->print_cr("\n__ Compile & CodeCache (global) lock wait took %10.3f seconds _________\n", ts_global.seconds());
ts_global.update(); // record starting point
}
if (aggregate) {
ts.update(); // record starting point
MutexLocker mu11(function_lock_1, Mutex::_safepoint_check_flag);
MutexLocker mu22(function_lock_2, Mutex::_no_safepoint_check_flag);
if ((function_lock_1 != nullptr) || (function_lock_2 != nullptr)) {
ConditionalMutexLocker mu11(Compile_lock, take_function_lock_1, Mutex::_safepoint_check_flag);
ConditionalMutexLocker mu22(CodeCache_lock, take_function_lock_2, Mutex::_no_safepoint_check_flag);
if (take_function_locks) {
out->print_cr("\n__ Compile & CodeCache (function) lock wait took %10.3f seconds _________\n", ts.seconds());
}
ts.update(); // record starting point
CodeCache::aggregate(out, granularity);
if ((function_lock_1 != nullptr) || (function_lock_2 != nullptr)) {
if (take_function_locks) {
out->print_cr("\n__ Compile & CodeCache (function) lock hold took %10.3f seconds _________\n", ts.seconds());
}
}
@ -2858,7 +2862,7 @@ void CompileBroker::print_heapinfo(outputStream* out, const char* function, size
}
if (discard) CodeCache::discard(out);
if ((global_lock_1 != nullptr) || (global_lock_2 != nullptr)) {
if (take_global_locks) {
out->print_cr("\n__ Compile & CodeCache (global) lock hold took %10.3f seconds _________\n", ts_global.seconds());
}
out->print_cr("\n__ CodeHeapStateAnalytics total duration %10.3f seconds _________\n", ts_total.seconds());

View File

@ -134,7 +134,7 @@ void HeapRegionRemSet::remove_code_root(nmethod* nm) {
assert(nm != nullptr, "sanity");
assert_locked_or_safepoint(CodeCache_lock);
MutexLocker ml(CodeCache_lock->owned_by_self() ? nullptr : &_m, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(&_m, !CodeCache_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
_code_roots.remove(nm);
// Check that there were no duplicates

View File

@ -2472,7 +2472,7 @@ void InstanceKlass::clean_method_data() {
for (int m = 0; m < methods()->length(); m++) {
MethodData* mdo = methods()->at(m)->method_data();
if (mdo != nullptr) {
MutexLocker ml(SafepointSynchronize::is_at_safepoint() ? nullptr : mdo->extra_data_lock());
ConditionalMutexLocker ml(mdo->extra_data_lock(), !SafepointSynchronize::is_at_safepoint());
mdo->clean_method_data(/*always_clean*/false);
}
}
@ -3406,8 +3406,7 @@ void InstanceKlass::add_osr_nmethod(nmethod* n) {
// Remove osr nmethod from the list. Return true if found and removed.
bool InstanceKlass::remove_osr_nmethod(nmethod* n) {
// This is a short non-blocking critical region, so the no safepoint check is ok.
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock
, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
assert(n->is_osr_method(), "wrong kind of nmethod");
nmethod* last = nullptr;
nmethod* cur = osr_nmethods_head();
@ -3448,8 +3447,7 @@ bool InstanceKlass::remove_osr_nmethod(nmethod* n) {
}
int InstanceKlass::mark_osr_nmethods(DeoptimizationScope* deopt_scope, const Method* m) {
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock,
Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
nmethod* osr = osr_nmethods_head();
int found = 0;
while (osr != nullptr) {
@ -3464,8 +3462,7 @@ int InstanceKlass::mark_osr_nmethods(DeoptimizationScope* deopt_scope, const Met
}
nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_level, bool match_level) const {
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock,
Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
nmethod* osr = osr_nmethods_head();
nmethod* best = nullptr;
while (osr != nullptr) {

View File

@ -1148,7 +1148,7 @@ void Method::clear_code() {
}
void Method::unlink_code(CompiledMethod *compare) {
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
// We need to check if either the _code or _from_compiled_code_entry_point
// refer to this nmethod because there is a race in setting these two fields
// in Method* as seen in bugid 4947125.
@ -1159,7 +1159,7 @@ void Method::unlink_code(CompiledMethod *compare) {
}
void Method::unlink_code() {
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
clear_code();
}

View File

@ -217,12 +217,7 @@ void JvmtiManageCapabilities::copy_capabilities(const jvmtiCapabilities *from, j
}
}
Mutex* JvmtiManageCapabilities::lock() {
if (Thread::current_or_null() == nullptr) {
return nullptr; // Detached thread, can be a call from Agent_OnLoad.
}
return _capabilities_lock;
}
void JvmtiManageCapabilities::get_potential_capabilities_nolock(const jvmtiCapabilities *current,
const jvmtiCapabilities *prohibited,
@ -246,7 +241,7 @@ void JvmtiManageCapabilities::get_potential_capabilities_nolock(const jvmtiCapab
void JvmtiManageCapabilities::get_potential_capabilities(const jvmtiCapabilities* current,
const jvmtiCapabilities* prohibited,
jvmtiCapabilities* result) {
MutexLocker ml(lock(), Mutex::_no_safepoint_check_flag);
CapabilitiesMutexLocker ml;
get_potential_capabilities_nolock(current, prohibited, result);
}
@ -254,7 +249,7 @@ jvmtiError JvmtiManageCapabilities::add_capabilities(const jvmtiCapabilities *cu
const jvmtiCapabilities *prohibited,
const jvmtiCapabilities *desired,
jvmtiCapabilities *result) {
MutexLocker ml(lock(), Mutex::_no_safepoint_check_flag);
CapabilitiesMutexLocker ml;
// check that the capabilities being added are potential capabilities
jvmtiCapabilities temp;
@ -296,7 +291,7 @@ jvmtiError JvmtiManageCapabilities::add_capabilities(const jvmtiCapabilities *cu
void JvmtiManageCapabilities::relinquish_capabilities(const jvmtiCapabilities *current,
const jvmtiCapabilities *unwanted,
jvmtiCapabilities *result) {
MutexLocker ml(lock(), Mutex::_no_safepoint_check_flag);
CapabilitiesMutexLocker ml;
jvmtiCapabilities to_trash;
jvmtiCapabilities temp;

View File

@ -54,6 +54,15 @@ private:
// lock to access the class data
static Mutex* _capabilities_lock;
// lock, unless called from a detached thread (e.g. can be a call from Agent_OnLoad)
class CapabilitiesMutexLocker: public ConditionalMutexLocker {
public:
CapabilitiesMutexLocker() :
ConditionalMutexLocker(_capabilities_lock,
Thread::current_or_null() != nullptr,
Mutex::_no_safepoint_check_flag) {}
};
// basic intenal operations
static jvmtiCapabilities *either(const jvmtiCapabilities *a, const jvmtiCapabilities *b, jvmtiCapabilities *result);
static jvmtiCapabilities *both(const jvmtiCapabilities *a, const jvmtiCapabilities *b, jvmtiCapabilities *result);
@ -67,9 +76,6 @@ private:
static jvmtiCapabilities init_always_solo_capabilities();
static jvmtiCapabilities init_onload_solo_capabilities();
// returns nullptr in onload phase
static Mutex* lock();
// get_potential_capabilities without lock
static void get_potential_capabilities_nolock(const jvmtiCapabilities* current,
const jvmtiCapabilities* prohibited,

View File

@ -117,8 +117,7 @@ DeoptimizationScope::~DeoptimizationScope() {
}
void DeoptimizationScope::mark(CompiledMethod* cm, bool inc_recompile_counts) {
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock,
Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
// If it's already marked but we still need it to be deopted.
if (cm->is_marked_for_deoptimization()) {
@ -139,8 +138,8 @@ void DeoptimizationScope::mark(CompiledMethod* cm, bool inc_recompile_counts) {
}
void DeoptimizationScope::dependent(CompiledMethod* cm) {
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock,
Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
// A method marked by someone else may have a _required_gen lower than what we marked with.
// Therefore only store it if it's higher than _required_gen.
if (_required_gen < cm->_deoptimization_generation) {
@ -170,8 +169,8 @@ void DeoptimizationScope::deoptimize_marked() {
bool wait = false;
while (true) {
{
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock,
Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
// First we check if we or someone else already deopted the gen we want.
if (DeoptimizationScope::_committed_deopt_gen >= _required_gen) {
DEBUG_ONLY(_deopted = true;)
@ -198,8 +197,8 @@ void DeoptimizationScope::deoptimize_marked() {
Deoptimization::deoptimize_all_marked(); // May safepoint and an additional deopt may have occurred.
DEBUG_ONLY(_deopted = true;)
{
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock,
Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(CompiledMethod_lock, !CompiledMethod_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
// Make sure that committed doesn't go backwards.
// Should only happen if we did a deopt during a safepoint above.
if (DeoptimizationScope::_committed_deopt_gen < comitting) {

View File

@ -512,7 +512,7 @@ bool HandshakeState::has_operation(bool allow_suspend, bool check_async_exceptio
bool HandshakeState::has_async_exception_operation() {
if (!has_operation()) return false;
MutexLocker ml(_lock.owned_by_self() ? nullptr : &_lock, Mutex::_no_safepoint_check_flag);
ConditionalMutexLocker ml(&_lock, !_lock.owned_by_self(), Mutex::_no_safepoint_check_flag);
return _queue.peek(async_exception_filter) != nullptr;
}

View File

@ -372,7 +372,7 @@ void mutex_init() {
#undef MUTEX_STORAGE
#undef MUTEX_STORAGE_NAME
void MutexLocker::post_initialize() {
void MutexLockerImpl::post_initialize() {
// Print mutex ranks if requested.
LogTarget(Info, vmmutex) lt;
if (lt.is_enabled()) {

View File

@ -187,11 +187,13 @@ void assert_lock_strong(const Mutex* lock);
#define assert_lock_strong(lock)
#endif
class MutexLocker: public StackObj {
// Internal implementation. Skips on null Mutex.
// Subclasses enforce stronger invariants.
class MutexLockerImpl: public StackObj {
protected:
Mutex* _mutex;
public:
MutexLocker(Mutex* mutex, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
MutexLockerImpl(Mutex* mutex, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
_mutex(mutex) {
bool no_safepoint_check = flag == Mutex::_no_safepoint_check_flag;
if (_mutex != nullptr) {
@ -203,7 +205,7 @@ class MutexLocker: public StackObj {
}
}
MutexLocker(Thread* thread, Mutex* mutex, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
MutexLockerImpl(Thread* thread, Mutex* mutex, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
_mutex(mutex) {
bool no_safepoint_check = flag == Mutex::_no_safepoint_check_flag;
if (_mutex != nullptr) {
@ -215,21 +217,51 @@ class MutexLocker: public StackObj {
}
}
~MutexLocker() {
~MutexLockerImpl() {
if (_mutex != nullptr) {
assert_lock_strong(_mutex);
_mutex->unlock();
}
}
public:
static void post_initialize();
};
// Simplest mutex locker.
// Does not allow null mutexes.
class MutexLocker: public MutexLockerImpl {
public:
MutexLocker(Mutex* mutex, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
MutexLockerImpl(mutex, flag) {
assert(mutex != nullptr, "null mutex not allowed");
}
MutexLocker(Thread* thread, Mutex* mutex, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
MutexLockerImpl(thread, mutex, flag) {
assert(mutex != nullptr, "null mutex not allowed");
}
};
// Conditional mutex locker.
// Like MutexLocker above, but only locks when condition is true.
class ConditionalMutexLocker: public MutexLockerImpl {
public:
ConditionalMutexLocker(Mutex* mutex, bool condition, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
MutexLockerImpl(condition ? mutex : nullptr, flag) {
assert(!condition || mutex != nullptr, "null mutex not allowed when locking");
}
ConditionalMutexLocker(Thread* thread, Mutex* mutex, bool condition, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
MutexLockerImpl(thread, condition ? mutex : nullptr, flag) {
assert(!condition || mutex != nullptr, "null mutex not allowed when locking");
}
};
// A MonitorLocker is like a MutexLocker above, except it allows
// wait/notify as well which are delegated to the underlying Monitor.
// It also disallows null.
class MonitorLocker: public MutexLocker {
class MonitorLocker: public MutexLockerImpl {
Mutex::SafepointCheckFlag _flag;
protected:
@ -239,13 +271,13 @@ class MonitorLocker: public MutexLocker {
public:
MonitorLocker(Monitor* monitor, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
MutexLocker(monitor, flag), _flag(flag) {
MutexLockerImpl(monitor, flag), _flag(flag) {
// Superclass constructor did locking
assert(monitor != nullptr, "null monitor not allowed");
}
MonitorLocker(Thread* thread, Monitor* monitor, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
MutexLocker(thread, monitor, flag), _flag(flag) {
MutexLockerImpl(thread, monitor, flag), _flag(flag) {
// Superclass constructor did locking
assert(monitor != nullptr, "null monitor not allowed");
}

View File

@ -87,7 +87,7 @@ void PeriodicTask::enroll() {
// not already own the PeriodicTask_lock. Otherwise, we don't try to
// enter it again because VM internal Mutexes do not support recursion.
//
MutexLocker ml(PeriodicTask_lock->owned_by_self() ? nullptr : PeriodicTask_lock);
ConditionalMutexLocker ml(PeriodicTask_lock, !PeriodicTask_lock->owned_by_self());
if (_num_tasks == PeriodicTask::max_tasks) {
fatal("Overflow in PeriodicTask table");
@ -108,7 +108,7 @@ void PeriodicTask::disenroll() {
// not already own the PeriodicTask_lock. Otherwise, we don't try to
// enter it again because VM internal Mutexes do not support recursion.
//
MutexLocker ml(PeriodicTask_lock->owned_by_self() ? nullptr : PeriodicTask_lock);
ConditionalMutexLocker ml(PeriodicTask_lock, !PeriodicTask_lock->owned_by_self());
int index;
for(index = 0; index < _num_tasks && _tasks[index] != this; index++)

View File

@ -140,7 +140,7 @@ inline ThreadsList* ThreadsSMRSupport::get_java_thread_list() {
}
inline bool ThreadsSMRSupport::is_a_protected_JavaThread_with_lock(JavaThread *thread) {
MutexLocker ml(Threads_lock->owned_by_self() ? nullptr : Threads_lock);
ConditionalMutexLocker ml(Threads_lock, !Threads_lock->owned_by_self());
return is_a_protected_JavaThread(thread);
}

View File

@ -653,7 +653,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
LogConfiguration::post_initialize();
Metaspace::post_initialize();
MutexLocker::post_initialize();
MutexLockerImpl::post_initialize();
HOTSPOT_VM_INIT_END();