Merge
This commit is contained in:
commit
562cf441d4
@ -67,6 +67,7 @@
|
||||
#include "runtime/javaCalls.hpp"
|
||||
#include "runtime/jniHandles.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
@ -76,6 +77,11 @@
|
||||
#include "trace/tracing.hpp"
|
||||
#endif
|
||||
|
||||
// helper function to avoid in-line casts
|
||||
template <typename T> static T* load_ptr_acquire(T* volatile *p) {
|
||||
return static_cast<T*>(OrderAccess::load_ptr_acquire(p));
|
||||
}
|
||||
|
||||
ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
|
||||
|
||||
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
|
||||
@ -147,20 +153,23 @@ void ClassLoaderData::Dependencies::oops_do(OopClosure* f) {
|
||||
}
|
||||
|
||||
void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
|
||||
for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
|
||||
// Lock-free access requires load_ptr_acquire
|
||||
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
|
||||
klass_closure->do_klass(k);
|
||||
assert(k != k->next_link(), "no loops!");
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderData::classes_do(void f(Klass * const)) {
|
||||
assert_locked_or_safepoint(_metaspace_lock);
|
||||
for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
|
||||
f(k);
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderData::methods_do(void f(Method*)) {
|
||||
for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
|
||||
// Lock-free access requires load_ptr_acquire
|
||||
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
|
||||
if (k->is_instance_klass()) {
|
||||
InstanceKlass::cast(k)->methods_do(f);
|
||||
}
|
||||
@ -179,7 +188,8 @@ void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
|
||||
}
|
||||
|
||||
void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
|
||||
for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
|
||||
// Lock-free access requires load_ptr_acquire
|
||||
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
|
||||
if (k->is_instance_klass()) {
|
||||
f(InstanceKlass::cast(k));
|
||||
}
|
||||
@ -188,6 +198,7 @@ void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
|
||||
}
|
||||
|
||||
void ClassLoaderData::modules_do(void f(ModuleEntry*)) {
|
||||
assert_locked_or_safepoint(Module_lock);
|
||||
if (_modules != NULL) {
|
||||
for (int i = 0; i < _modules->table_size(); i++) {
|
||||
for (ModuleEntry* entry = _modules->bucket(i);
|
||||
@ -200,9 +211,11 @@ void ClassLoaderData::modules_do(void f(ModuleEntry*)) {
|
||||
}
|
||||
|
||||
void ClassLoaderData::packages_do(void f(PackageEntry*)) {
|
||||
if (_packages != NULL) {
|
||||
for (int i = 0; i < _packages->table_size(); i++) {
|
||||
for (PackageEntry* entry = _packages->bucket(i);
|
||||
// Lock-free access requires load_ptr_acquire
|
||||
PackageEntryTable* packages = load_ptr_acquire(&_packages);
|
||||
if (packages != NULL) {
|
||||
for (int i = 0; i < packages->table_size(); i++) {
|
||||
for (PackageEntry* entry = packages->bucket(i);
|
||||
entry != NULL;
|
||||
entry = entry->next()) {
|
||||
f(entry);
|
||||
@ -325,10 +338,9 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
|
||||
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
|
||||
Klass* old_value = _klasses;
|
||||
k->set_next_link(old_value);
|
||||
// Make sure linked class is stable, since the class list is walked without a lock
|
||||
OrderAccess::storestore();
|
||||
// link the new item into the list
|
||||
_klasses = k;
|
||||
// Link the new item into the list, making sure the linked class is stable
|
||||
// since the list can be walked without a lock
|
||||
OrderAccess::release_store_ptr(&_klasses, k);
|
||||
}
|
||||
|
||||
if (publicize && k->class_loader_data() != NULL) {
|
||||
@ -343,11 +355,10 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
|
||||
}
|
||||
}
|
||||
|
||||
// This is called by InstanceKlass::deallocate_contents() to remove the
|
||||
// scratch_class for redefine classes. We need a lock because there it may not
|
||||
// be called at a safepoint if there's an error.
|
||||
// Remove a klass from the _klasses list for scratch_class during redefinition
|
||||
// or parsed class in the case of an error.
|
||||
void ClassLoaderData::remove_class(Klass* scratch_class) {
|
||||
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
|
||||
Klass* prev = NULL;
|
||||
for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
|
||||
if (k == scratch_class) {
|
||||
@ -390,42 +401,46 @@ void ClassLoaderData::unload() {
|
||||
|
||||
PackageEntryTable* ClassLoaderData::packages() {
|
||||
// Lazily create the package entry table at first request.
|
||||
if (_packages == NULL) {
|
||||
// Lock-free access requires load_ptr_acquire.
|
||||
PackageEntryTable* packages = load_ptr_acquire(&_packages);
|
||||
if (packages == NULL) {
|
||||
MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
|
||||
// Check if _packages got allocated while we were waiting for this lock.
|
||||
if (_packages == NULL) {
|
||||
_packages = new PackageEntryTable(PackageEntryTable::_packagetable_entry_size);
|
||||
if ((packages = _packages) == NULL) {
|
||||
packages = new PackageEntryTable(PackageEntryTable::_packagetable_entry_size);
|
||||
// Ensure _packages is stable, since it is examined without a lock
|
||||
OrderAccess::release_store_ptr(&_packages, packages);
|
||||
}
|
||||
}
|
||||
return _packages;
|
||||
return packages;
|
||||
}
|
||||
|
||||
ModuleEntryTable* ClassLoaderData::modules() {
|
||||
// Lazily create the module entry table at first request.
|
||||
if (_modules == NULL) {
|
||||
// Lock-free access requires load_ptr_acquire.
|
||||
ModuleEntryTable* modules = load_ptr_acquire(&_modules);
|
||||
if (modules == NULL) {
|
||||
MutexLocker m1(Module_lock);
|
||||
// Check again if _modules has been allocated while we were getting this lock.
|
||||
if (_modules != NULL) {
|
||||
return _modules;
|
||||
}
|
||||
// Check if _modules got allocated while we were waiting for this lock.
|
||||
if ((modules = _modules) == NULL) {
|
||||
modules = new ModuleEntryTable(ModuleEntryTable::_moduletable_entry_size);
|
||||
// Each loader has one unnamed module entry. Create it before
|
||||
// any classes, loaded by this loader, are defined in case
|
||||
// they end up being defined in loader's unnamed module.
|
||||
modules->create_unnamed_module(this);
|
||||
|
||||
ModuleEntryTable* temp_table = new ModuleEntryTable(ModuleEntryTable::_moduletable_entry_size);
|
||||
// Each loader has one unnamed module entry. Create it before
|
||||
// any classes, loaded by this loader, are defined in case
|
||||
// they end up being defined in loader's unnamed module.
|
||||
temp_table->create_unnamed_module(this);
|
||||
|
||||
{
|
||||
MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
|
||||
// Ensure _modules is stable, since it is examined without a lock
|
||||
OrderAccess::storestore();
|
||||
_modules = temp_table;
|
||||
{
|
||||
MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
|
||||
// Ensure _modules is stable, since it is examined without a lock
|
||||
OrderAccess::release_store_ptr(&_modules, modules);
|
||||
}
|
||||
}
|
||||
}
|
||||
return _modules;
|
||||
return modules;
|
||||
}
|
||||
|
||||
oop ClassLoaderData::keep_alive_object() const {
|
||||
assert_locked_or_safepoint(_metaspace_lock);
|
||||
assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
|
||||
return is_anonymous() ? _klasses->java_mirror() : class_loader();
|
||||
}
|
||||
@ -499,30 +514,33 @@ Metaspace* ClassLoaderData::metaspace_non_null() {
|
||||
// to create smaller arena for Reflection class loaders also.
|
||||
// The reason for the delayed allocation is because some class loaders are
|
||||
// simply for delegating with no metadata of their own.
|
||||
if (_metaspace == NULL) {
|
||||
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
|
||||
// Check again if metaspace has been allocated while we were getting this lock.
|
||||
if (_metaspace != NULL) {
|
||||
return _metaspace;
|
||||
}
|
||||
if (this == the_null_class_loader_data()) {
|
||||
assert (class_loader() == NULL, "Must be");
|
||||
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::BootMetaspaceType));
|
||||
} else if (is_anonymous()) {
|
||||
if (class_loader() != NULL) {
|
||||
log_trace(class, loader, data)("is_anonymous: %s", class_loader()->klass()->internal_name());
|
||||
// Lock-free access requires load_ptr_acquire.
|
||||
Metaspace* metaspace = load_ptr_acquire(&_metaspace);
|
||||
if (metaspace == NULL) {
|
||||
MutexLockerEx ml(_metaspace_lock, Mutex::_no_safepoint_check_flag);
|
||||
// Check if _metaspace got allocated while we were waiting for this lock.
|
||||
if ((metaspace = _metaspace) == NULL) {
|
||||
if (this == the_null_class_loader_data()) {
|
||||
assert (class_loader() == NULL, "Must be");
|
||||
metaspace = new Metaspace(_metaspace_lock, Metaspace::BootMetaspaceType);
|
||||
} else if (is_anonymous()) {
|
||||
if (class_loader() != NULL) {
|
||||
log_trace(class, loader, data)("is_anonymous: %s", class_loader()->klass()->internal_name());
|
||||
}
|
||||
metaspace = new Metaspace(_metaspace_lock, Metaspace::AnonymousMetaspaceType);
|
||||
} else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) {
|
||||
if (class_loader() != NULL) {
|
||||
log_trace(class, loader, data)("is_reflection: %s", class_loader()->klass()->internal_name());
|
||||
}
|
||||
metaspace = new Metaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType);
|
||||
} else {
|
||||
metaspace = new Metaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
|
||||
}
|
||||
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::AnonymousMetaspaceType));
|
||||
} else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) {
|
||||
if (class_loader() != NULL) {
|
||||
log_trace(class, loader, data)("is_reflection: %s", class_loader()->klass()->internal_name());
|
||||
}
|
||||
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType));
|
||||
} else {
|
||||
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::StandardMetaspaceType));
|
||||
// Ensure _metaspace is stable, since it is examined without a lock
|
||||
OrderAccess::release_store_ptr(&_metaspace, metaspace);
|
||||
}
|
||||
}
|
||||
return _metaspace;
|
||||
return metaspace;
|
||||
}
|
||||
|
||||
JNIHandleBlock* ClassLoaderData::handles() const { return _handles; }
|
||||
@ -638,6 +656,7 @@ void ClassLoaderData::dump(outputStream * const out) {
|
||||
#endif // PRODUCT
|
||||
|
||||
void ClassLoaderData::verify() {
|
||||
assert_locked_or_safepoint(_metaspace_lock);
|
||||
oop cl = class_loader();
|
||||
|
||||
guarantee(this == class_loader_data(cl) || is_anonymous(), "Must be the same");
|
||||
@ -656,7 +675,8 @@ void ClassLoaderData::verify() {
|
||||
}
|
||||
|
||||
bool ClassLoaderData::contains_klass(Klass* klass) {
|
||||
for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
|
||||
// Lock-free access requires load_ptr_acquire
|
||||
for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
|
||||
if (k == klass) return true;
|
||||
}
|
||||
return false;
|
||||
@ -1046,6 +1066,7 @@ ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic
|
||||
|
||||
// Find the first klass in the CLDG.
|
||||
while (cld != NULL) {
|
||||
assert_locked_or_safepoint(cld->metaspace_lock());
|
||||
klass = cld->_klasses;
|
||||
if (klass != NULL) {
|
||||
_next_klass = klass;
|
||||
@ -1063,6 +1084,7 @@ Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass_in_cldg(Klass* klass)
|
||||
|
||||
// No more klasses in the current CLD. Time to find a new CLD.
|
||||
ClassLoaderData* cld = klass->class_loader_data();
|
||||
assert_locked_or_safepoint(cld->metaspace_lock());
|
||||
while (next == NULL) {
|
||||
cld = cld->next();
|
||||
if (cld == NULL) {
|
||||
|
@ -171,8 +171,8 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
Dependencies _dependencies; // holds dependencies from this class loader
|
||||
// data to others.
|
||||
|
||||
Metaspace * _metaspace; // Meta-space where meta-data defined by the
|
||||
// classes in the class loader are allocated.
|
||||
Metaspace * volatile _metaspace; // Meta-space where meta-data defined by the
|
||||
// classes in the class loader are allocated.
|
||||
Mutex* _metaspace_lock; // Locks the metaspace for allocations and setup.
|
||||
bool _unloading; // true if this class loader goes away
|
||||
bool _is_anonymous; // if this CLD is for an anonymous class
|
||||
@ -186,9 +186,9 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
JNIHandleBlock* _handles; // Handles to constant pool arrays, Modules, etc, which
|
||||
// have the same life cycle of the corresponding ClassLoader.
|
||||
|
||||
Klass* _klasses; // The classes defined by the class loader.
|
||||
PackageEntryTable* _packages; // The packages defined by the class loader.
|
||||
ModuleEntryTable* _modules; // The modules defined by the class loader.
|
||||
Klass* volatile _klasses; // The classes defined by the class loader.
|
||||
PackageEntryTable* volatile _packages; // The packages defined by the class loader.
|
||||
ModuleEntryTable* volatile _modules; // The modules defined by the class loader.
|
||||
|
||||
// These method IDs are created for the class loader and set to NULL when the
|
||||
// class loader is unloaded. They are rarely freed, only for redefine classes
|
||||
@ -216,8 +216,6 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies);
|
||||
~ClassLoaderData();
|
||||
|
||||
void set_metaspace(Metaspace* m) { _metaspace = m; }
|
||||
|
||||
JNIHandleBlock* handles() const;
|
||||
void set_handles(JNIHandleBlock* handles);
|
||||
|
||||
|
@ -1104,21 +1104,21 @@ void InstanceKlass::call_class_initializer_impl(instanceKlassHandle this_k, TRAP
|
||||
|
||||
void InstanceKlass::mask_for(const methodHandle& method, int bci,
|
||||
InterpreterOopMap* entry_for) {
|
||||
// Dirty read, then double-check under a lock.
|
||||
if (_oop_map_cache == NULL) {
|
||||
// Otherwise, allocate a new one.
|
||||
// Lazily create the _oop_map_cache at first request
|
||||
// Lock-free access requires load_ptr_acquire.
|
||||
OopMapCache* oop_map_cache =
|
||||
static_cast<OopMapCache*>(OrderAccess::load_ptr_acquire(&_oop_map_cache));
|
||||
if (oop_map_cache == NULL) {
|
||||
MutexLocker x(OopMapCacheAlloc_lock);
|
||||
// First time use. Allocate a cache in C heap
|
||||
if (_oop_map_cache == NULL) {
|
||||
// Release stores from OopMapCache constructor before assignment
|
||||
// to _oop_map_cache. C++ compilers on ppc do not emit the
|
||||
// required memory barrier only because of the volatile
|
||||
// qualifier of _oop_map_cache.
|
||||
OrderAccess::release_store_ptr(&_oop_map_cache, new OopMapCache());
|
||||
// Check if _oop_map_cache was allocated while we were waiting for this lock
|
||||
if ((oop_map_cache = _oop_map_cache) == NULL) {
|
||||
oop_map_cache = new OopMapCache();
|
||||
// Ensure _oop_map_cache is stable, since it is examined without a lock
|
||||
OrderAccess::release_store_ptr(&_oop_map_cache, oop_map_cache);
|
||||
}
|
||||
}
|
||||
// _oop_map_cache is constant after init; lookup below does is own locking.
|
||||
_oop_map_cache->lookup(method, bci, entry_for);
|
||||
// _oop_map_cache is constant after init; lookup below does its own locking.
|
||||
oop_map_cache->lookup(method, bci, entry_for);
|
||||
}
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user