8313332: Simplify lazy jmethodID cache in InstanceKlass
Reviewed-by: amenkov, sspitsyn, dcubed
This commit is contained in:
parent
b9da14012d
commit
21867c929a
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -104,6 +104,7 @@ class outputStream;
|
||||
LOG_TAG(itables) \
|
||||
LOG_TAG(jfr) \
|
||||
LOG_TAG(jit) \
|
||||
LOG_TAG(jmethod) \
|
||||
LOG_TAG(jni) \
|
||||
LOG_TAG(jvmci) \
|
||||
LOG_TAG(jvmti) \
|
||||
|
@ -2267,16 +2267,26 @@ void InstanceKlass::set_enclosing_method_indices(u2 class_index,
|
||||
}
|
||||
}
|
||||
|
||||
jmethodID InstanceKlass::update_jmethod_id(jmethodID* jmeths, Method* method, int idnum) {
|
||||
if (method->is_old() && !method->is_obsolete()) {
|
||||
// If the method passed in is old (but not obsolete), use the current version.
|
||||
method = method_with_idnum((int)idnum);
|
||||
assert(method != nullptr, "old and but not obsolete, so should exist");
|
||||
}
|
||||
jmethodID new_id = Method::make_jmethod_id(class_loader_data(), method);
|
||||
Atomic::release_store(&jmeths[idnum + 1], new_id);
|
||||
return new_id;
|
||||
}
|
||||
|
||||
// Lookup or create a jmethodID.
|
||||
// This code is called by the VMThread and JavaThreads so the
|
||||
// locking has to be done very carefully to avoid deadlocks
|
||||
// and/or other cache consistency problems.
|
||||
//
|
||||
jmethodID InstanceKlass::get_jmethod_id(const methodHandle& method_h) {
|
||||
size_t idnum = (size_t)method_h->method_idnum();
|
||||
Method* method = method_h();
|
||||
int idnum = method->method_idnum();
|
||||
jmethodID* jmeths = methods_jmethod_ids_acquire();
|
||||
size_t length = 0;
|
||||
jmethodID id = nullptr;
|
||||
|
||||
// We use a double-check locking idiom here because this cache is
|
||||
// performance sensitive. In the normal system, this cache only
|
||||
@ -2288,82 +2298,65 @@ jmethodID InstanceKlass::get_jmethod_id(const methodHandle& method_h) {
|
||||
// seen either. Cache reads of existing jmethodIDs proceed without a
|
||||
// lock, but cache writes of a new jmethodID requires uniqueness and
|
||||
// creation of the cache itself requires no leaks so a lock is
|
||||
// generally acquired in those two cases.
|
||||
// acquired in those two cases.
|
||||
//
|
||||
// If the RedefineClasses() API has been used, then this cache can
|
||||
// grow and we'll have transitions from non-null to bigger non-null.
|
||||
// Cache creation requires no leaks and we require safety between all
|
||||
// cache accesses and freeing of the old cache so a lock is generally
|
||||
// acquired when the RedefineClasses() API has been used.
|
||||
// If the RedefineClasses() API has been used, then this cache grows
|
||||
// in the redefinition safepoint.
|
||||
|
||||
if (jmeths != nullptr) {
|
||||
// the cache already exists
|
||||
if (!idnum_can_increment()) {
|
||||
// the cache can't grow so we can just get the current values
|
||||
get_jmethod_id_length_value(jmeths, idnum, &length, &id);
|
||||
} else {
|
||||
MutexLocker ml(JmethodIdCreation_lock, Mutex::_no_safepoint_check_flag);
|
||||
get_jmethod_id_length_value(jmeths, idnum, &length, &id);
|
||||
if (jmeths == nullptr) {
|
||||
MutexLocker ml(JmethodIdCreation_lock, Mutex::_no_safepoint_check_flag);
|
||||
jmeths = methods_jmethod_ids_acquire();
|
||||
// Still null?
|
||||
if (jmeths == nullptr) {
|
||||
size_t size = idnum_allocated_count();
|
||||
assert(size > (size_t)idnum, "should already have space");
|
||||
jmeths = NEW_C_HEAP_ARRAY(jmethodID, size + 1, mtClass);
|
||||
memset(jmeths, 0, (size + 1) * sizeof(jmethodID));
|
||||
// cache size is stored in element[0], other elements offset by one
|
||||
jmeths[0] = (jmethodID)size;
|
||||
jmethodID new_id = update_jmethod_id(jmeths, method, idnum);
|
||||
|
||||
// publish jmeths
|
||||
release_set_methods_jmethod_ids(jmeths);
|
||||
return new_id;
|
||||
}
|
||||
}
|
||||
// implied else:
|
||||
// we need to allocate a cache so default length and id values are good
|
||||
|
||||
if (jmeths == nullptr || // no cache yet
|
||||
length <= idnum || // cache is too short
|
||||
id == nullptr) { // cache doesn't contain entry
|
||||
|
||||
// This function can be called by the VMThread or GC worker threads so we
|
||||
// have to do all things that might block on a safepoint before grabbing the lock.
|
||||
// Otherwise, we can deadlock with the VMThread or have a cache
|
||||
// consistency issue. These vars keep track of what we might have
|
||||
// to free after the lock is dropped.
|
||||
jmethodID to_dealloc_id = nullptr;
|
||||
jmethodID* to_dealloc_jmeths = nullptr;
|
||||
|
||||
// may not allocate new_jmeths or use it if we allocate it
|
||||
jmethodID* new_jmeths = nullptr;
|
||||
if (length <= idnum) {
|
||||
// allocate a new cache that might be used
|
||||
size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
|
||||
new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1, mtClass);
|
||||
memset(new_jmeths, 0, (size+1)*sizeof(jmethodID));
|
||||
// cache size is stored in element[0], other elements offset by one
|
||||
new_jmeths[0] = (jmethodID)size;
|
||||
}
|
||||
|
||||
// allocate a new jmethodID that might be used
|
||||
{
|
||||
MutexLocker ml(JmethodIdCreation_lock, Mutex::_no_safepoint_check_flag);
|
||||
jmethodID new_id = nullptr;
|
||||
if (method_h->is_old() && !method_h->is_obsolete()) {
|
||||
// The method passed in is old (but not obsolete), we need to use the current version
|
||||
Method* current_method = method_with_idnum((int)idnum);
|
||||
assert(current_method != nullptr, "old and but not obsolete, so should exist");
|
||||
new_id = Method::make_jmethod_id(class_loader_data(), current_method);
|
||||
} else {
|
||||
// It is the current version of the method or an obsolete method,
|
||||
// use the version passed in
|
||||
new_id = Method::make_jmethod_id(class_loader_data(), method_h());
|
||||
}
|
||||
|
||||
id = get_jmethod_id_fetch_or_update(idnum, new_id, new_jmeths,
|
||||
&to_dealloc_id, &to_dealloc_jmeths);
|
||||
}
|
||||
|
||||
// The lock has been dropped so we can free resources.
|
||||
// Free up either the old cache or the new cache if we allocated one.
|
||||
if (to_dealloc_jmeths != nullptr) {
|
||||
FreeHeap(to_dealloc_jmeths);
|
||||
}
|
||||
// free up the new ID since it wasn't needed
|
||||
if (to_dealloc_id != nullptr) {
|
||||
Method::destroy_jmethod_id(class_loader_data(), to_dealloc_id);
|
||||
jmethodID id = Atomic::load_acquire(&jmeths[idnum + 1]);
|
||||
if (id == nullptr) {
|
||||
MutexLocker ml(JmethodIdCreation_lock, Mutex::_no_safepoint_check_flag);
|
||||
id = jmeths[idnum + 1];
|
||||
// Still null?
|
||||
if (id == nullptr) {
|
||||
return update_jmethod_id(jmeths, method, idnum);
|
||||
}
|
||||
}
|
||||
return id;
|
||||
}
|
||||
|
||||
void InstanceKlass::update_methods_jmethod_cache() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
|
||||
jmethodID* cache = _methods_jmethod_ids;
|
||||
if (cache != nullptr) {
|
||||
size_t size = idnum_allocated_count();
|
||||
size_t old_size = (size_t)cache[0];
|
||||
if (old_size < size + 1) {
|
||||
// Allocate a larger one and copy entries to the new one.
|
||||
// They've already been updated to point to new methods where applicable (i.e., not obsolete).
|
||||
jmethodID* new_cache = NEW_C_HEAP_ARRAY(jmethodID, size + 1, mtClass);
|
||||
memset(new_cache, 0, (size + 1) * sizeof(jmethodID));
|
||||
// The cache size is stored in element[0]; the other elements are offset by one.
|
||||
new_cache[0] = (jmethodID)size;
|
||||
|
||||
for (int i = 1; i <= (int)old_size; i++) {
|
||||
new_cache[i] = cache[i];
|
||||
}
|
||||
_methods_jmethod_ids = new_cache;
|
||||
FREE_C_HEAP_ARRAY(jmethodID, cache);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Figure out how many jmethodIDs haven't been allocated, and make
|
||||
// sure space for them is pre-allocated. This makes getting all
|
||||
// method ids much, much faster with classes with more than 8
|
||||
@ -2384,88 +2377,11 @@ void InstanceKlass::ensure_space_for_methodids(int start_offset) {
|
||||
}
|
||||
}
|
||||
|
||||
// Common code to fetch the jmethodID from the cache or update the
|
||||
// cache with the new jmethodID. This function should never do anything
|
||||
// that causes the caller to go to a safepoint or we can deadlock with
|
||||
// the VMThread or have cache consistency issues.
|
||||
//
|
||||
jmethodID InstanceKlass::get_jmethod_id_fetch_or_update(
|
||||
size_t idnum, jmethodID new_id,
|
||||
jmethodID* new_jmeths, jmethodID* to_dealloc_id_p,
|
||||
jmethodID** to_dealloc_jmeths_p) {
|
||||
assert(new_id != nullptr, "sanity check");
|
||||
assert(to_dealloc_id_p != nullptr, "sanity check");
|
||||
assert(to_dealloc_jmeths_p != nullptr, "sanity check");
|
||||
assert(JmethodIdCreation_lock->owned_by_self(), "sanity check");
|
||||
|
||||
// reacquire the cache - we are locked, single threaded or at a safepoint
|
||||
jmethodID* jmeths = methods_jmethod_ids_acquire();
|
||||
jmethodID id = nullptr;
|
||||
size_t length = 0;
|
||||
|
||||
if (jmeths == nullptr || // no cache yet
|
||||
(length = (size_t)jmeths[0]) <= idnum) { // cache is too short
|
||||
if (jmeths != nullptr) {
|
||||
// copy any existing entries from the old cache
|
||||
for (size_t index = 0; index < length; index++) {
|
||||
new_jmeths[index+1] = jmeths[index+1];
|
||||
}
|
||||
*to_dealloc_jmeths_p = jmeths; // save old cache for later delete
|
||||
}
|
||||
release_set_methods_jmethod_ids(jmeths = new_jmeths);
|
||||
} else {
|
||||
// fetch jmethodID (if any) from the existing cache
|
||||
id = jmeths[idnum+1];
|
||||
*to_dealloc_jmeths_p = new_jmeths; // save new cache for later delete
|
||||
}
|
||||
if (id == nullptr) {
|
||||
// No matching jmethodID in the existing cache or we have a new
|
||||
// cache or we just grew the cache. This cache write is done here
|
||||
// by the first thread to win the foot race because a jmethodID
|
||||
// needs to be unique once it is generally available.
|
||||
id = new_id;
|
||||
|
||||
// The jmethodID cache can be read while unlocked so we have to
|
||||
// make sure the new jmethodID is complete before installing it
|
||||
// in the cache.
|
||||
Atomic::release_store(&jmeths[idnum+1], id);
|
||||
} else {
|
||||
*to_dealloc_id_p = new_id; // save new id for later delete
|
||||
}
|
||||
return id;
|
||||
}
|
||||
|
||||
|
||||
// Common code to get the jmethodID cache length and the jmethodID
|
||||
// value at index idnum if there is one.
|
||||
//
|
||||
void InstanceKlass::get_jmethod_id_length_value(jmethodID* cache,
|
||||
size_t idnum, size_t *length_p, jmethodID* id_p) {
|
||||
assert(cache != nullptr, "sanity check");
|
||||
assert(length_p != nullptr, "sanity check");
|
||||
assert(id_p != nullptr, "sanity check");
|
||||
|
||||
// cache size is stored in element[0], other elements offset by one
|
||||
*length_p = (size_t)cache[0];
|
||||
if (*length_p <= idnum) { // cache is too short
|
||||
*id_p = nullptr;
|
||||
} else {
|
||||
*id_p = cache[idnum+1]; // fetch jmethodID (if any)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Lookup a jmethodID, null if not found. Do no blocking, no allocations, no handles
|
||||
jmethodID InstanceKlass::jmethod_id_or_null(Method* method) {
|
||||
size_t idnum = (size_t)method->method_idnum();
|
||||
int idnum = method->method_idnum();
|
||||
jmethodID* jmeths = methods_jmethod_ids_acquire();
|
||||
size_t length; // length assigned as debugging crumb
|
||||
jmethodID id = nullptr;
|
||||
if (jmeths != nullptr && // If there is a cache
|
||||
(length = (size_t)jmeths[0]) > idnum) { // and if it is long enough,
|
||||
id = jmeths[idnum+1]; // Look up the id (may be null)
|
||||
}
|
||||
return id;
|
||||
return (jmeths != nullptr) ? jmeths[idnum + 1] : nullptr;
|
||||
}
|
||||
|
||||
inline DependencyContext InstanceKlass::dependencies() {
|
||||
@ -2884,7 +2800,7 @@ void InstanceKlass::release_C_heap_structures(bool release_sub_metadata) {
|
||||
set_jni_ids(nullptr);
|
||||
|
||||
jmethodID* jmeths = methods_jmethod_ids_acquire();
|
||||
if (jmeths != (jmethodID*)nullptr) {
|
||||
if (jmeths != nullptr) {
|
||||
release_set_methods_jmethod_ids(nullptr);
|
||||
FreeHeap(jmeths);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -237,9 +237,9 @@ class InstanceKlass: public Klass {
|
||||
JavaThread* volatile _init_thread; // Pointer to current thread doing initialization (to handle recursive initialization)
|
||||
|
||||
OopMapCache* volatile _oop_map_cache; // OopMapCache for all methods in the klass (allocated lazily)
|
||||
JNIid* _jni_ids; // First JNI identifier for static fields in this class
|
||||
jmethodID* volatile _methods_jmethod_ids; // jmethodIDs corresponding to method_idnum, or null if none
|
||||
nmethodBucket* volatile _dep_context; // packed DependencyContext structure
|
||||
JNIid* _jni_ids; // First JNI identifier for static fields in this class
|
||||
jmethodID* volatile _methods_jmethod_ids; // jmethodIDs corresponding to method_idnum, or null if none
|
||||
nmethodBucket* volatile _dep_context; // packed DependencyContext structure
|
||||
uint64_t volatile _dep_context_last_cleaned;
|
||||
nmethod* _osr_nmethods_head; // Head of list of on-stack replacement nmethods for this class
|
||||
#if INCLUDE_JVMTI
|
||||
@ -794,14 +794,9 @@ public:
|
||||
|
||||
// jmethodID support
|
||||
jmethodID get_jmethod_id(const methodHandle& method_h);
|
||||
jmethodID get_jmethod_id_fetch_or_update(size_t idnum,
|
||||
jmethodID new_id, jmethodID* new_jmeths,
|
||||
jmethodID* to_dealloc_id_p,
|
||||
jmethodID** to_dealloc_jmeths_p);
|
||||
static void get_jmethod_id_length_value(jmethodID* cache, size_t idnum,
|
||||
size_t *length_p, jmethodID* id_p);
|
||||
void ensure_space_for_methodids(int start_offset = 0);
|
||||
jmethodID jmethod_id_or_null(Method* method);
|
||||
void update_methods_jmethod_cache();
|
||||
|
||||
// annotations support
|
||||
Annotations* annotations() const { return _annotations; }
|
||||
@ -1073,17 +1068,12 @@ public:
|
||||
Atomic::store(&_init_thread, thread);
|
||||
}
|
||||
|
||||
// The RedefineClasses() API can cause new method idnums to be needed
|
||||
// which will cause the caches to grow. Safety requires different
|
||||
// cache management logic if the caches can grow instead of just
|
||||
// going from null to non-null.
|
||||
bool idnum_can_increment() const { return has_been_redefined(); }
|
||||
inline jmethodID* methods_jmethod_ids_acquire() const;
|
||||
inline void release_set_methods_jmethod_ids(jmethodID* jmeths);
|
||||
// This nulls out jmethodIDs for all methods in 'klass'
|
||||
static void clear_jmethod_ids(InstanceKlass* klass);
|
||||
jmethodID update_jmethod_id(jmethodID* jmeths, Method* method, int idnum);
|
||||
|
||||
// Lock during initialization
|
||||
public:
|
||||
// Returns the array class for the n'th dimension
|
||||
virtual ArrayKlass* array_klass(int n, TRAPS);
|
||||
|
@ -2143,14 +2143,6 @@ class JNIMethodBlock : public CHeapObj<mtClass> {
|
||||
return false; // not found
|
||||
}
|
||||
|
||||
// Doesn't really destroy it, just marks it as free so it can be reused.
|
||||
void destroy_method(Method** m) {
|
||||
#ifdef ASSERT
|
||||
assert(contains(m), "should be a methodID");
|
||||
#endif // ASSERT
|
||||
*m = _free_method;
|
||||
}
|
||||
|
||||
// During class unloading the methods are cleared, which is different
|
||||
// than freed.
|
||||
void clear_all_methods() {
|
||||
@ -2203,6 +2195,9 @@ jmethodID Method::make_jmethod_id(ClassLoaderData* cld, Method* m) {
|
||||
// Also have to add the method to the list safely, which the lock
|
||||
// protects as well.
|
||||
assert(JmethodIdCreation_lock->owned_by_self(), "sanity check");
|
||||
|
||||
ResourceMark rm;
|
||||
log_debug(jmethod)("Creating jmethodID for Method %s", m->external_name());
|
||||
if (cld->jmethod_ids() == nullptr) {
|
||||
cld->set_jmethod_ids(new JNIMethodBlock());
|
||||
}
|
||||
@ -2215,14 +2210,6 @@ jmethodID Method::jmethod_id() {
|
||||
return method_holder()->get_jmethod_id(mh);
|
||||
}
|
||||
|
||||
// Mark a jmethodID as free. This is called when there is a data race in
|
||||
// InstanceKlass while creating the jmethodID cache.
|
||||
void Method::destroy_jmethod_id(ClassLoaderData* cld, jmethodID m) {
|
||||
Method** ptr = (Method**)m;
|
||||
assert(cld->jmethod_ids() != nullptr, "should have method handles");
|
||||
cld->jmethod_ids()->destroy_method(ptr);
|
||||
}
|
||||
|
||||
void Method::change_method_associated_with_jmethod_id(jmethodID jmid, Method* new_method) {
|
||||
// Can't assert the method_holder is the same because the new method has the
|
||||
// scratch method holder.
|
||||
|
@ -700,7 +700,6 @@ public:
|
||||
// made obsolete or deleted -- in these cases, the jmethodID
|
||||
// refers to null (as is the case for any weak reference).
|
||||
static jmethodID make_jmethod_id(ClassLoaderData* cld, Method* mh);
|
||||
static void destroy_jmethod_id(ClassLoaderData* cld, jmethodID mid);
|
||||
|
||||
// Ensure there is enough capacity in the internal tracking data
|
||||
// structures to hold the number of jmethodIDs you plan to generate.
|
||||
|
@ -4350,7 +4350,8 @@ void VM_RedefineClasses::redefine_single_class(Thread* current, jclass the_jclas
|
||||
the_class->vtable().initialize_vtable();
|
||||
the_class->itable().initialize_itable();
|
||||
|
||||
// Leave arrays of jmethodIDs and itable index cache unchanged
|
||||
// Update jmethodID cache if present.
|
||||
the_class->update_methods_jmethod_cache();
|
||||
|
||||
// Copy the "source debug extension" attribute from new class version
|
||||
the_class->set_source_debug_extension(
|
||||
|
@ -253,7 +253,6 @@
|
||||
nonstatic_field(InstanceKlass, _jni_ids, JNIid*) \
|
||||
nonstatic_field(InstanceKlass, _osr_nmethods_head, nmethod*) \
|
||||
JVMTI_ONLY(nonstatic_field(InstanceKlass, _breakpoints, BreakpointInfo*)) \
|
||||
volatile_nonstatic_field(InstanceKlass, _methods_jmethod_ids, jmethodID*) \
|
||||
volatile_nonstatic_field(InstanceKlass, _idnum_allocated_count, u2) \
|
||||
nonstatic_field(InstanceKlass, _annotations, Annotations*) \
|
||||
nonstatic_field(InstanceKlass, _method_ordering, Array<int>*) \
|
||||
|
Loading…
Reference in New Issue
Block a user