8224674: NMethod state machine is not monotonic

Reviewed-by: dlong, coleenp, thartmann
This commit is contained in:
Erik Österlund 2019-07-18 11:15:20 +02:00
parent e23d02b484
commit 03030e473c
5 changed files with 66 additions and 23 deletions

View File

@ -168,7 +168,7 @@ private:
int state() const { return *_state_adr; }
// Non-virtual for speed
bool _is_alive() const { return state() < zombie; }
bool _is_alive() const { return state() < unloaded; }
virtual bool is_zombie() const { return state() == zombie; }
virtual bool is_unloaded() const { return state() == unloaded; }

View File

@ -208,9 +208,9 @@ public:
not_used = 1, // not entrant, but revivable
not_entrant = 2, // marked for deoptimization but activations may still exist,
// will be transformed to zombie when all activations are gone
zombie = 3, // no activations exist, nmethod is ready for purge
unloaded = 4 // there should be no activations, should not be called,
// will be transformed to zombie immediately
unloaded = 3, // there should be no activations, should not be called, will be
// transformed to zombie by the sweeper, when not "locked in vm".
zombie = 4 // no activations exist, nmethod is ready for purge
};
virtual bool is_in_use() const = 0;

View File

@ -1136,6 +1136,20 @@ void nmethod::inc_decompile_count() {
mdo->inc_decompile_count();
}
bool nmethod::try_transition(int new_state_int) {
signed char new_state = new_state_int;
for (;;) {
signed char old_state = Atomic::load(&_state);
if (old_state >= new_state) {
// Ensure monotonicity of transitions.
return false;
}
if (Atomic::cmpxchg(new_state, &_state, old_state) == old_state) {
return true;
}
}
}
void nmethod::make_unloaded() {
post_compiled_method_unload();
@ -1159,7 +1173,9 @@ void nmethod::make_unloaded() {
}
// Unlink the osr method, so we do not look this up again
if (is_osr_method()) {
// Invalidate the osr nmethod only once
// Invalidate the osr nmethod only once. Note that with concurrent
// code cache unloading, OSR nmethods are invalidated before they
// are made unloaded. Therefore, this becomes a no-op then.
if (is_in_use()) {
invalidate_osr_method();
}
@ -1213,12 +1229,14 @@ void nmethod::make_unloaded() {
set_osr_link(NULL);
NMethodSweeper::report_state_change(this);
// The release is only needed for compile-time ordering, as accesses
// into the nmethod after the store are not safe due to the sweeper
// being allowed to free it when the store is observed, during
// concurrent nmethod unloading. Therefore, there is no need for
// acquire on the loader side.
OrderAccess::release_store(&_state, (signed char)unloaded);
bool transition_success = try_transition(unloaded);
// It is an important invariant that there exists no race between
// the sweeper and GC thread competing for making the same nmethod
// zombie and unloaded respectively. This is ensured by
// can_convert_to_zombie() returning false for any is_unloading()
// nmethod, informing the sweeper not to step on any GC toes.
assert(transition_success, "Invalid nmethod transition to unloaded");
#if INCLUDE_JVMCI
// Clear the link between this nmethod and a HotSpotNmethod mirror
@ -1283,7 +1301,7 @@ bool nmethod::make_not_entrant_or_zombie(int state) {
assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
assert(!is_zombie(), "should not already be a zombie");
if (_state == state) {
if (Atomic::load(&_state) >= state) {
// Avoid taking the lock if already in required state.
// This is safe from races because the state is an end-state,
// which the nmethod cannot back out of once entered.
@ -1318,7 +1336,7 @@ bool nmethod::make_not_entrant_or_zombie(int state) {
// Enter critical section. Does not block for safepoint.
MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
if (_state == state) {
if (Atomic::load(&_state) >= state) {
// another thread already performed this transition so nothing
// to do, but return false to indicate this.
return false;
@ -1354,7 +1372,18 @@ bool nmethod::make_not_entrant_or_zombie(int state) {
}
// Change state
_state = state;
if (!try_transition(state)) {
// If the transition fails, it is due to another thread making the nmethod more
// dead. In particular, one thread might be making the nmethod unloaded concurrently.
// If so, having patched in the jump in the verified entry unnecessarily is fine.
// The nmethod is no longer possible to call by Java threads.
// Incrementing the decompile count is also fine as the caller of make_not_entrant()
// had a valid reason to deoptimize the nmethod.
// Marking the nmethod as seen on stack also has no effect, as the nmethod is now
// !is_alive(), and the seen on stack value is only used to convert not_entrant
// nmethods to zombie in can_convert_to_zombie().
return false;
}
// Log the transition once
log_state_change();

View File

@ -212,6 +212,9 @@ class nmethod : public CompiledMethod {
void* operator new(size_t size, int nmethod_size, int comp_level) throw();
const char* reloc_string_for(u_char* begin, u_char* end);
bool try_transition(int new_state);
// Returns true if this thread changed the state of the nmethod or
// false if another thread performed the transition.
bool make_not_entrant_or_zombie(int state);
@ -339,7 +342,7 @@ class nmethod : public CompiledMethod {
// flag accessing and manipulation
bool is_not_installed() const { return _state == not_installed; }
bool is_in_use() const { return _state <= in_use; }
bool is_alive() const { return _state < zombie; }
bool is_alive() const { return _state < unloaded; }
bool is_not_entrant() const { return _state == not_entrant; }
bool is_zombie() const { return _state == zombie; }
bool is_unloaded() const { return _state == unloaded; }

View File

@ -261,6 +261,24 @@ private:
Atomic::store(true, &_failed);
}
void unlink(nmethod* nm) {
// Unlinking of the dependencies must happen before the
// handshake separating unlink and purge.
nm->flush_dependencies(false /* delete_immediately */);
// We don't need to take the lock when unlinking nmethods from
// the Method, because it is only concurrently unlinked by
// the entry barrier, which acquires the per nmethod lock.
nm->unlink_from_method(false /* acquire_lock */);
if (nm->is_osr_method()) {
// Invalidate the osr nmethod before the handshake. The nmethod
// will be made unloaded after the handshake. Then invalidate_osr_method()
// will be called again, which will be a no-op.
nm->invalidate_osr_method();
}
}
public:
ZNMethodUnlinkClosure(bool unloading_occurred) :
_unloading_occurred(unloading_occurred),
@ -278,14 +296,7 @@ public:
ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
if (nm->is_unloading()) {
// Unlinking of the dependencies must happen before the
// handshake separating unlink and purge.
nm->flush_dependencies(false /* delete_immediately */);
// We don't need to take the lock when unlinking nmethods from
// the Method, because it is only concurrently unlinked by
// the entry barrier, which acquires the per nmethod lock.
nm->unlink_from_method(false /* acquire_lock */);
unlink(nm);
return;
}