8333005: Deadlock when setting or updating the inline cache

Reviewed-by: sjohanss, shade
This commit is contained in:
Erik Österlund 2024-05-31 12:49:22 +00:00
parent 7ab74c5f26
commit d481215126
15 changed files with 84 additions and 40 deletions

@ -148,18 +148,21 @@ public:
return;
}
ShenandoahReentrantLocker locker(nm_data->lock());
{
ShenandoahReentrantLocker locker(nm_data->lock());
// Heal oops and disarm
if (_bs->is_armed(nm)) {
ShenandoahEvacOOMScope oom_evac_scope;
ShenandoahNMethod::heal_nmethod_metadata(nm_data);
// Code cache unloading needs to know about on-stack nmethods. Arm the nmethods to get
// mark_as_maybe_on_stack() callbacks when they are used again.
_bs->set_guard_value(nm, 0);
// Heal oops and disarm
if (_bs->is_armed(nm)) {
ShenandoahEvacOOMScope oom_evac_scope;
ShenandoahNMethod::heal_nmethod_metadata(nm_data);
// Code cache unloading needs to know about on-stack nmethods. Arm the nmethods to get
// mark_as_maybe_on_stack() callbacks when they are used again.
_bs->set_guard_value(nm, 0);
}
}
// Clear compiled ICs and exception caches
ShenandoahReentrantLocker locker(nm_data->ic_lock());
nm->unload_nmethod_caches(_unloading_occurred);
}
};

@ -32,7 +32,7 @@
#include "runtime/continuation.hpp"
ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>& oops, bool non_immediate_oops) :
_nm(nm), _oops(nullptr), _oops_count(0), _unregistered(false) {
_nm(nm), _oops(nullptr), _oops_count(0), _unregistered(false), _lock(), _ic_lock() {
if (!oops.is_empty()) {
_oops_count = oops.length();

@ -44,6 +44,7 @@ private:
bool _has_non_immed_oops;
bool _unregistered;
ShenandoahReentrantLock _lock;
ShenandoahReentrantLock _ic_lock;
public:
ShenandoahNMethod(nmethod *nm, GrowableArray<oop*>& oops, bool has_non_immed_oops);
@ -51,6 +52,7 @@ public:
inline nmethod* nm() const;
inline ShenandoahReentrantLock* lock();
inline ShenandoahReentrantLock* ic_lock();
inline void oops_do(OopClosure* oops, bool fix_relocations = false);
// Update oops when the nmethod is re-registered
void update();
@ -59,6 +61,7 @@ public:
static ShenandoahNMethod* for_nmethod(nmethod* nm);
static inline ShenandoahReentrantLock* lock_for_nmethod(nmethod* nm);
static inline ShenandoahReentrantLock* ic_lock_for_nmethod(nmethod* nm);
static void heal_nmethod(nmethod* nm);
static inline void heal_nmethod_metadata(ShenandoahNMethod* nmethod_data);

@ -39,6 +39,10 @@ ShenandoahReentrantLock* ShenandoahNMethod::lock() {
return &_lock;
}
ShenandoahReentrantLock* ShenandoahNMethod::ic_lock() {
return &_ic_lock;
}
bool ShenandoahNMethod::is_unregistered() const {
return _unregistered;
}
@ -85,6 +89,10 @@ ShenandoahReentrantLock* ShenandoahNMethod::lock_for_nmethod(nmethod* nm) {
return gc_data(nm)->lock();
}
ShenandoahReentrantLock* ShenandoahNMethod::ic_lock_for_nmethod(nmethod* nm) {
return gc_data(nm)->ic_lock();
}
bool ShenandoahNMethodTable::iteration_in_progress() const {
return _itr_cnt > 0;
}

@ -91,14 +91,14 @@ public:
class ShenandoahCompiledICProtectionBehaviour : public CompiledICProtectionBehaviour {
public:
virtual bool lock(nmethod* nm) {
ShenandoahReentrantLock* const lock = ShenandoahNMethod::lock_for_nmethod(nm);
ShenandoahReentrantLock* const lock = ShenandoahNMethod::ic_lock_for_nmethod(nm);
assert(lock != nullptr, "Not yet registered?");
lock->lock();
return true;
}
virtual void unlock(nmethod* nm) {
ShenandoahReentrantLock* const lock = ShenandoahNMethod::lock_for_nmethod(nm);
ShenandoahReentrantLock* const lock = ShenandoahNMethod::ic_lock_for_nmethod(nm);
assert(lock != nullptr, "Not yet registered?");
lock->unlock();
}
@ -108,7 +108,7 @@ public:
return true;
}
ShenandoahReentrantLock* const lock = ShenandoahNMethod::lock_for_nmethod(nm);
ShenandoahReentrantLock* const lock = ShenandoahNMethod::ic_lock_for_nmethod(nm);
assert(lock != nullptr, "Not yet registered?");
return lock->owned_by_self();
}

@ -99,6 +99,10 @@ XReentrantLock* XNMethod::lock_for_nmethod(nmethod* nm) {
return gc_data(nm)->lock();
}
XReentrantLock* XNMethod::ic_lock_for_nmethod(nmethod* nm) {
return gc_data(nm)->ic_lock();
}
void XNMethod::log_register(const nmethod* nm) {
LogTarget(Trace, gc, nmethod) log;
if (!log.is_enabled()) {
@ -295,15 +299,18 @@ public:
return;
}
XLocker<XReentrantLock> locker(XNMethod::lock_for_nmethod(nm));
{
XLocker<XReentrantLock> locker(XNMethod::lock_for_nmethod(nm));
if (XNMethod::is_armed(nm)) {
// Heal oops and arm phase invariantly
XNMethod::nmethod_oops_barrier(nm);
XNMethod::set_guard_value(nm, 0);
if (XNMethod::is_armed(nm)) {
// Heal oops and arm phase invariantly
XNMethod::nmethod_oops_barrier(nm);
XNMethod::set_guard_value(nm, 0);
}
}
// Clear compiled ICs and exception caches
XLocker<XReentrantLock> locker(XNMethod::ic_lock_for_nmethod(nm));
nm->unload_nmethod_caches(_unloading_occurred);
}

@ -58,6 +58,7 @@ public:
static void nmethods_do(NMethodClosure* cl);
static XReentrantLock* lock_for_nmethod(nmethod* nm);
static XReentrantLock* ic_lock_for_nmethod(nmethod* nm);
static void unlink(XWorkers* workers, bool unloading_occurred);
static void purge();

@ -66,6 +66,7 @@ bool XNMethodDataOops::has_non_immediates() const {
XNMethodData::XNMethodData() :
_lock(),
_ic_lock(),
_oops(nullptr) {}
XNMethodData::~XNMethodData() {
@ -76,6 +77,10 @@ XReentrantLock* XNMethodData::lock() {
return &_lock;
}
XReentrantLock* XNMethodData::ic_lock() {
return &_ic_lock;
}
XNMethodDataOops* XNMethodData::oops() const {
return Atomic::load_acquire(&_oops);
}

@ -56,6 +56,7 @@ public:
class XNMethodData : public CHeapObj<mtGC> {
private:
XReentrantLock _lock;
XReentrantLock _ic_lock;
XNMethodDataOops* volatile _oops;
public:
@ -63,6 +64,7 @@ public:
~XNMethodData();
XReentrantLock* lock();
XReentrantLock* ic_lock();
XNMethodDataOops* oops() const;
XNMethodDataOops* swap_oops(XNMethodDataOops* oops);

@ -87,13 +87,13 @@ public:
class XCompiledICProtectionBehaviour : public CompiledICProtectionBehaviour {
public:
virtual bool lock(nmethod* nm) {
XReentrantLock* const lock = XNMethod::lock_for_nmethod(nm);
XReentrantLock* const lock = XNMethod::ic_lock_for_nmethod(nm);
lock->lock();
return true;
}
virtual void unlock(nmethod* nm) {
XReentrantLock* const lock = XNMethod::lock_for_nmethod(nm);
XReentrantLock* const lock = XNMethod::ic_lock_for_nmethod(nm);
lock->unlock();
}
@ -102,7 +102,7 @@ public:
return true;
}
XReentrantLock* const lock = XNMethod::lock_for_nmethod(nm);
XReentrantLock* const lock = XNMethod::ic_lock_for_nmethod(nm);
return lock->is_owned();
}
};

@ -104,6 +104,10 @@ ZReentrantLock* ZNMethod::lock_for_nmethod(nmethod* nm) {
return gc_data(nm)->lock();
}
ZReentrantLock* ZNMethod::ic_lock_for_nmethod(nmethod* nm) {
return gc_data(nm)->ic_lock();
}
void ZNMethod::log_register(const nmethod* nm) {
LogTarget(Debug, gc, nmethod) log;
if (!log.is_enabled()) {
@ -351,31 +355,34 @@ public:
return;
}
ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
{
ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
if (ZNMethod::is_armed(nm)) {
const uintptr_t prev_color = ZNMethod::color(nm);
assert(prev_color != ZPointerStoreGoodMask, "Potentially non-monotonic transition");
if (ZNMethod::is_armed(nm)) {
const uintptr_t prev_color = ZNMethod::color(nm);
assert(prev_color != ZPointerStoreGoodMask, "Potentially non-monotonic transition");
// Heal oops and potentially mark young objects if there is a concurrent young collection.
ZUncoloredRootProcessOopClosure cl(prev_color);
ZNMethod::nmethod_oops_do_inner(nm, &cl);
// Heal oops and potentially mark young objects if there is a concurrent young collection.
ZUncoloredRootProcessOopClosure cl(prev_color);
ZNMethod::nmethod_oops_do_inner(nm, &cl);
// Disarm for marking and relocation, but leave the remset bits so this isn't store good.
// This makes sure the mutator still takes a slow path to fill in the nmethod epoch for
// the sweeper, to track continuations, if they exist in the system.
const zpointer new_disarm_value_ptr = ZAddress::color(zaddress::null, ZPointerMarkGoodMask | ZPointerRememberedMask);
// Disarm for marking and relocation, but leave the remset bits so this isn't store good.
// This makes sure the mutator still takes a slow path to fill in the nmethod epoch for
// the sweeper, to track continuations, if they exist in the system.
const zpointer new_disarm_value_ptr = ZAddress::color(zaddress::null, ZPointerMarkGoodMask | ZPointerRememberedMask);
// The new disarm value is mark good, and hence never store good. Therefore, this operation
// never completely disarms the nmethod. Therefore, we don't need to patch barriers yet
// via ZNMethod::nmethod_patch_barriers.
ZNMethod::set_guard_value(nm, (int)untype(new_disarm_value_ptr));
// The new disarm value is mark good, and hence never store good. Therefore, this operation
// never completely disarms the nmethod. Therefore, we don't need to patch barriers yet
// via ZNMethod::nmethod_patch_barriers.
ZNMethod::set_guard_value(nm, (int)untype(new_disarm_value_ptr));
log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by unlinking [" PTR_FORMAT " -> " PTR_FORMAT "]", p2i(nm), prev_color, untype(new_disarm_value_ptr));
assert(ZNMethod::is_armed(nm), "Must be considered armed");
log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by unlinking [" PTR_FORMAT " -> " PTR_FORMAT "]", p2i(nm), prev_color, untype(new_disarm_value_ptr));
assert(ZNMethod::is_armed(nm), "Must be considered armed");
}
}
// Clear compiled ICs and exception caches
ZLocker<ZReentrantLock> locker(ZNMethod::ic_lock_for_nmethod(nm));
nm->unload_nmethod_caches(_unloading_occurred);
}
};

@ -63,6 +63,7 @@ public:
static void nmethods_do(bool secondary, NMethodClosure* cl);
static ZReentrantLock* lock_for_nmethod(nmethod* nm);
static ZReentrantLock* ic_lock_for_nmethod(nmethod* nm);
static void unlink(ZWorkers* workers, bool unloading_occurred);
static void purge();

@ -28,6 +28,7 @@
ZNMethodData::ZNMethodData()
: _lock(),
_ic_lock(),
_barriers(),
_immediate_oops(),
_has_non_immediate_oops(false) {}
@ -36,6 +37,10 @@ ZReentrantLock* ZNMethodData::lock() {
return &_lock;
}
ZReentrantLock* ZNMethodData::ic_lock() {
return &_ic_lock;
}
const ZArray<ZNMethodDataBarrier>* ZNMethodData::barriers() const {
assert(_lock.is_owned(), "Should be owned");
return &_barriers;

@ -38,6 +38,7 @@ struct ZNMethodDataBarrier {
class ZNMethodData : public CHeapObj<mtGC> {
private:
ZReentrantLock _lock;
ZReentrantLock _ic_lock;
ZArray<ZNMethodDataBarrier> _barriers;
ZArray<oop*> _immediate_oops;
bool _has_non_immediate_oops;
@ -46,6 +47,7 @@ public:
ZNMethodData();
ZReentrantLock* lock();
ZReentrantLock* ic_lock();
const ZArray<ZNMethodDataBarrier>* barriers() const;
const ZArray<oop*>* immediate_oops() const;

@ -90,13 +90,13 @@ public:
class ZCompiledICProtectionBehaviour : public CompiledICProtectionBehaviour {
public:
virtual bool lock(nmethod* nm) {
ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm);
ZReentrantLock* const lock = ZNMethod::ic_lock_for_nmethod(nm);
lock->lock();
return true;
}
virtual void unlock(nmethod* nm) {
ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm);
ZReentrantLock* const lock = ZNMethod::ic_lock_for_nmethod(nm);
lock->unlock();
}
@ -105,7 +105,7 @@ public:
return true;
}
ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm);
ZReentrantLock* const lock = ZNMethod::ic_lock_for_nmethod(nm);
return lock->is_owned();
}
};