8212989: Allow CompiledMethod ExceptionCache have unloaded klasses

Reviewed-by: kvn, pliden
This commit is contained in:
Erik Österlund 2018-11-05 08:01:39 +01:00
parent fdb010502b
commit f2fe71b186
6 changed files with 118 additions and 23 deletions

View File

@ -145,6 +145,7 @@ address CodeCache::_low_bound = 0;
address CodeCache::_high_bound = 0; address CodeCache::_high_bound = 0;
int CodeCache::_number_of_nmethods_with_dependencies = 0; int CodeCache::_number_of_nmethods_with_dependencies = 0;
nmethod* CodeCache::_scavenge_root_nmethods = NULL; nmethod* CodeCache::_scavenge_root_nmethods = NULL;
ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL;
// Initialize arrays of CodeHeap subsets // Initialize arrays of CodeHeap subsets
GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
@ -895,6 +896,34 @@ void CodeCache::verify_icholder_relocations() {
#endif #endif
} }
// Defer freeing of concurrently cleaned ExceptionCache entries until
// after a global handshake operation.
void CodeCache::release_exception_cache(ExceptionCache* entry) {
if (SafepointSynchronize::is_at_safepoint()) {
delete entry;
} else {
for (;;) {
ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
entry->set_purge_list_next(purge_list_head);
if (Atomic::cmpxchg(entry, &_exception_cache_purge_list, purge_list_head) == purge_list_head) {
break;
}
}
}
}
// Delete exception caches that have been concurrently unlinked,
// followed by a global handshake operation.
void CodeCache::purge_exception_caches() {
ExceptionCache* curr = _exception_cache_purge_list;
while (curr != NULL) {
ExceptionCache* next = curr->purge_list_next();
delete curr;
curr = next;
}
_exception_cache_purge_list = NULL;
}
void CodeCache::gc_prologue() { } void CodeCache::gc_prologue() { }
void CodeCache::gc_epilogue() { void CodeCache::gc_epilogue() {

View File

@ -72,8 +72,9 @@
// existing ones, make sure to adapt the dtrace scripts (jhelper.d) for // existing ones, make sure to adapt the dtrace scripts (jhelper.d) for
// Solaris and BSD. // Solaris and BSD.
class OopClosure; class ExceptionCache;
class KlassDepChange; class KlassDepChange;
class OopClosure;
class CodeCache : AllStatic { class CodeCache : AllStatic {
friend class VMStructs; friend class VMStructs;
@ -94,6 +95,8 @@ class CodeCache : AllStatic {
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link() static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
static uint8_t _unloading_cycle; // Global state for recognizing old nmethods that need to be unloaded static uint8_t _unloading_cycle; // Global state for recognizing old nmethods that need to be unloaded
static ExceptionCache* volatile _exception_cache_purge_list;
static void mark_scavenge_root_nmethods() PRODUCT_RETURN; static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN; static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
@ -192,6 +195,8 @@ class CodeCache : AllStatic {
static uint16_t unloading_cycle() { return _unloading_cycle; } static uint16_t unloading_cycle() { return _unloading_cycle; }
static void increment_unloading_cycle(); static void increment_unloading_cycle();
static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN; static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
static void release_exception_cache(ExceptionCache* entry);
static void purge_exception_caches();
// Apply f to every live code blob in scavengable nmethods. Prune nmethods // Apply f to every live code blob in scavengable nmethods. Prune nmethods
// from the list of scavengable nmethods if f->fix_relocations() and a nmethod // from the list of scavengable nmethods if f->fix_relocations() and a nmethod

View File

@ -103,34 +103,84 @@ const char* CompiledMethod::state() const {
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
ExceptionCache* CompiledMethod::exception_cache_acquire() const {
return OrderAccess::load_acquire(&_exception_cache);
}
void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) { void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock"); assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
assert(new_entry != NULL,"Must be non null"); assert(new_entry != NULL,"Must be non null");
assert(new_entry->next() == NULL, "Must be null"); assert(new_entry->next() == NULL, "Must be null");
ExceptionCache *ec = exception_cache(); for (;;) {
if (ec != NULL) { ExceptionCache *ec = exception_cache();
new_entry->set_next(ec); if (ec != NULL) {
Klass* ex_klass = ec->exception_type();
if (!ex_klass->is_loader_alive()) {
// We must guarantee that entries are not inserted with new next pointer
// edges to ExceptionCache entries with dead klasses, due to bad interactions
// with concurrent ExceptionCache cleanup. Therefore, the inserts roll
// the head pointer forward to the first live ExceptionCache, so that the new
// next pointers always point at live ExceptionCaches, that are not removed due
// to concurrent ExceptionCache cleanup.
ExceptionCache* next = ec->next();
if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) {
CodeCache::release_exception_cache(ec);
}
continue;
}
ec = exception_cache();
if (ec != NULL) {
new_entry->set_next(ec);
}
}
if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) {
return;
}
} }
release_set_exception_cache(new_entry);
} }
void CompiledMethod::clean_exception_cache() { void CompiledMethod::clean_exception_cache() {
// For each nmethod, only a single thread may call this cleanup function
// at the same time, whether called in STW cleanup or concurrent cleanup.
// Note that if the GC is processing exception cache cleaning in a concurrent phase,
// then a single writer may contend with cleaning up the head pointer to the
// first ExceptionCache node that has a Klass* that is alive. That is fine,
// as long as there is no concurrent cleanup of next pointers from concurrent writers.
// And the concurrent writers do not clean up next pointers, only the head.
// Also note that concurent readers will walk through Klass* pointers that are not
// alive. That does not cause ABA problems, because Klass* is deleted after
// a handshake with all threads, after all stale ExceptionCaches have been
// unlinked. That is also when the CodeCache::exception_cache_purge_list()
// is deleted, with all ExceptionCache entries that were cleaned concurrently.
// That similarly implies that CAS operations on ExceptionCache entries do not
// suffer from ABA problems as unlinking and deletion is separated by a global
// handshake operation.
ExceptionCache* prev = NULL; ExceptionCache* prev = NULL;
ExceptionCache* curr = exception_cache(); ExceptionCache* curr = exception_cache_acquire();
while (curr != NULL) { while (curr != NULL) {
ExceptionCache* next = curr->next(); ExceptionCache* next = curr->next();
Klass* ex_klass = curr->exception_type(); if (!curr->exception_type()->is_loader_alive()) {
if (ex_klass != NULL && !ex_klass->is_loader_alive()) {
if (prev == NULL) { if (prev == NULL) {
set_exception_cache(next); // Try to clean head; this is contended by concurrent inserts, that
// both lazily clean the head, and insert entries at the head. If
// the CAS fails, the operation is restarted.
if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) {
prev = NULL;
curr = exception_cache_acquire();
continue;
}
} else { } else {
// It is impossible to during cleanup connect the next pointer to
// an ExceptionCache that has not been published before a safepoint
// prior to the cleanup. Therefore, release is not required.
prev->set_next(next); prev->set_next(next);
} }
delete curr;
// prev stays the same. // prev stays the same.
CodeCache::release_exception_cache(curr);
} else { } else {
prev = curr; prev = curr;
} }
@ -145,7 +195,7 @@ address CompiledMethod::handler_for_exception_and_pc(Handle exception, address p
// We never grab a lock to read the exception cache, so we may // We never grab a lock to read the exception cache, so we may
// have false negatives. This is okay, as it can only happen during // have false negatives. This is okay, as it can only happen during
// the first few exception lookups for a given nmethod. // the first few exception lookups for a given nmethod.
ExceptionCache* ec = exception_cache(); ExceptionCache* ec = exception_cache_acquire();
while (ec != NULL) { while (ec != NULL) {
address ret_val; address ret_val;
if ((ret_val = ec->match(exception,pc)) != NULL) { if ((ret_val = ec->match(exception,pc)) != NULL) {
@ -172,13 +222,11 @@ void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address
} }
} }
//-------------end of code for ExceptionCache--------------
// private method for handling exception cache // private method for handling exception cache
// These methods are private, and used to manipulate the exception cache // These methods are private, and used to manipulate the exception cache
// directly. // directly.
ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) { ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
ExceptionCache* ec = exception_cache(); ExceptionCache* ec = exception_cache_acquire();
while (ec != NULL) { while (ec != NULL) {
if (ec->match_exception_with_space(exception)) { if (ec->match_exception_with_space(exception)) {
return ec; return ec;
@ -188,6 +236,8 @@ ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle excep
return NULL; return NULL;
} }
//-------------end of code for ExceptionCache--------------
bool CompiledMethod::is_at_poll_return(address pc) { bool CompiledMethod::is_at_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1); RelocIterator iter(this, pc, pc+1);
while (iter.next()) { while (iter.next()) {

View File

@ -48,7 +48,8 @@ class ExceptionCache : public CHeapObj<mtCode> {
address _pc[cache_size]; address _pc[cache_size];
address _handler[cache_size]; address _handler[cache_size];
volatile int _count; volatile int _count;
ExceptionCache* _next; ExceptionCache* volatile _next;
ExceptionCache* _purge_list_next;
inline address pc_at(int index); inline address pc_at(int index);
void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; } void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
@ -65,8 +66,10 @@ class ExceptionCache : public CHeapObj<mtCode> {
ExceptionCache(Handle exception, address pc, address handler); ExceptionCache(Handle exception, address pc, address handler);
Klass* exception_type() { return _exception_type; } Klass* exception_type() { return _exception_type; }
ExceptionCache* next() { return _next; } ExceptionCache* next();
void set_next(ExceptionCache *ec) { _next = ec; } void set_next(ExceptionCache *ec);
ExceptionCache* purge_list_next() { return _purge_list_next; }
void set_purge_list_next(ExceptionCache *ec) { _purge_list_next = ec; }
address match(Handle exception, address pc); address match(Handle exception, address pc);
bool match_exception_with_space(Handle exception) ; bool match_exception_with_space(Handle exception) ;
@ -293,11 +296,14 @@ public:
virtual Metadata** metadata_addr_at(int index) const = 0; virtual Metadata** metadata_addr_at(int index) const = 0;
virtual void set_original_pc(const frame* fr, address pc) = 0; virtual void set_original_pc(const frame* fr, address pc) = 0;
protected:
// Exception cache support // Exception cache support
// Note: _exception_cache may be read concurrently. We rely on memory_order_consume here. // Note: _exception_cache may be read and cleaned concurrently.
ExceptionCache* exception_cache() const { return _exception_cache; } ExceptionCache* exception_cache() const { return _exception_cache; }
ExceptionCache* exception_cache_acquire() const;
void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
void release_set_exception_cache(ExceptionCache *ec);
public:
address handler_for_exception_and_pc(Handle exception, address pc); address handler_for_exception_and_pc(Handle exception, address pc);
void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
void clean_exception_cache(); void clean_exception_cache();

View File

@ -41,10 +41,6 @@ inline bool CompiledMethod::is_deopt_entry(address pc) {
; ;
} }
inline void CompiledMethod::release_set_exception_cache(ExceptionCache *ec) {
OrderAccess::release_store(&_exception_cache, ec);
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// CompiledMethod::get_deopt_original_pc // CompiledMethod::get_deopt_original_pc
// //

View File

@ -244,6 +244,7 @@ ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
_count = 0; _count = 0;
_exception_type = exception->klass(); _exception_type = exception->klass();
_next = NULL; _next = NULL;
_purge_list_next = NULL;
add_address_and_handler(pc,handler); add_address_and_handler(pc,handler);
} }
@ -293,6 +294,14 @@ bool ExceptionCache::add_address_and_handler(address addr, address handler) {
return false; return false;
} }
ExceptionCache* ExceptionCache::next() {
return Atomic::load(&_next);
}
void ExceptionCache::set_next(ExceptionCache *ec) {
Atomic::store(ec, &_next);
}
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------