8213346: Re-implement shared dictionary using CompactHashtable

Reviewed-by: jiangli
This commit is contained in:
Ioi Lam 2018-11-07 19:40:27 -08:00
parent 14b8e187e2
commit 147fc3ed13
31 changed files with 832 additions and 1049 deletions

@ -287,7 +287,7 @@ InstanceKlass* ClassListParser::load_class_from_source(Symbol* class_name, TRAPS
if (!is_id_specified()) {
error("If source location is specified, id must be also specified");
}
InstanceKlass* k = ClassLoaderExt::load_class(class_name, _source, THREAD);
InstanceKlass* k = ClassLoaderExt::load_class(class_name, _source, CHECK_NULL);
if (strncmp(_class_name, "java/", 5) == 0) {
log_info(cds)("Prohibited package for non-bootstrap classes: %s.class from %s",
@ -303,8 +303,9 @@ InstanceKlass* ClassListParser::load_class_from_source(Symbol* class_name, TRAPS
_interfaces->length(), k->local_interfaces()->length());
}
if (!SystemDictionaryShared::add_non_builtin_klass(class_name, ClassLoaderData::the_null_class_loader_data(),
k, THREAD)) {
bool added = SystemDictionaryShared::add_unregistered_class(k, CHECK_NULL);
if (!added) {
// We allow only a single unregistered class for each unique name.
error("Duplicated class %s", _class_name);
}
@ -353,7 +354,7 @@ Klass* ClassListParser::load_current_class(TRAPS) {
vmSymbols::loadClass_name(),
vmSymbols::string_class_signature(),
ext_class_name,
THREAD);
THREAD); // <-- failure is handled below
} else {
// array classes are not supported in class list.
THROW_NULL(vmSymbols::java_lang_ClassNotFoundException());

@ -350,7 +350,3 @@ ClassPathEntry* ClassLoaderExt::find_classpath_entry_from_cache(const char* path
cached_path_entries->insert_before(0, ccpe);
return new_entry;
}
Klass* ClassLoaderExt::load_one_class(ClassListParser* parser, TRAPS) {
return parser->load_current_class(THREAD);
}

@ -115,7 +115,6 @@ public:
static void record_result(const s2 classpath_index,
InstanceKlass* result, TRAPS);
static InstanceKlass* load_class(Symbol* h_name, const char* path, TRAPS);
static Klass* load_one_class(ClassListParser* parser, TRAPS);
static void set_has_app_classes() {
_has_app_classes = true;
}

@ -163,6 +163,7 @@ void CompactHashtableWriter::dump(SimpleCompactHashtable *cht, const char* table
msg.info("Average bucket size : %9.3f", summary.avg());
msg.info("Variance of bucket size : %9.3f", summary.variance());
msg.info("Std. dev. of bucket size: %9.3f", summary.sd());
msg.info("Maximum bucket size : %9d", (int)summary.maximum());
msg.info("Empty buckets : %9d", _num_empty_buckets);
msg.info("Value_Only buckets : %9d", _num_value_only_buckets);
msg.info("Other buckets : %9d", _num_other_buckets);

@ -244,8 +244,6 @@ class CompactHashtable : public SimpleCompactHashtable {
}
public:
CompactHashtable() : SimpleCompactHashtable() {}
// Lookup a value V from the compact table using key K
inline V lookup(K key, unsigned int hash, int len) const {
if (_entry_count > 0) {
@ -299,8 +297,53 @@ public:
}
}
}
void print_table_statistics(outputStream* st, const char* name) {
st->print_cr("%s statistics:", name);
int total_entries = 0;
int max_bucket = 0;
for (u4 i = 0; i < _bucket_count; i++) {
u4 bucket_info = _buckets[i];
int bucket_type = BUCKET_TYPE(bucket_info);
int bucket_size;
if (bucket_type == VALUE_ONLY_BUCKET_TYPE) {
bucket_size = 1;
} else {
bucket_size = (BUCKET_OFFSET(_buckets[i + 1]) - BUCKET_OFFSET(bucket_info)) / 2;
}
total_entries += bucket_size;
if (max_bucket < bucket_size) {
max_bucket = bucket_size;
}
}
st->print_cr("Number of buckets : %9d", _bucket_count);
st->print_cr("Number of entries : %9d", total_entries);
st->print_cr("Maximum bucket size : %9d", max_bucket);
}
};
////////////////////////////////////////////////////////////////////////
//
// OffsetCompactHashtable -- This is used to store many types of objects
// in the CDS archive. On 64-bit platforms, we save space by using a 32-bit
// offset from the CDS base address.
template <typename V>
V read_value_from_compact_hashtable(address base_address, u4 offset) {
return (V)(base_address + offset);
}
template <
typename K,
typename V,
bool (*EQUALS)(V value, K key, int len)
>
class OffsetCompactHashtable : public CompactHashtable<
K, V, read_value_from_compact_hashtable<V>, EQUALS> {
};
////////////////////////////////////////////////////////////////////////
//
// Read/Write the contents of a hashtable textual dump (created by

@ -27,7 +27,6 @@
#include "classfile/dictionary.inline.hpp"
#include "classfile/protectionDomainCache.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/iterator.hpp"
@ -44,16 +43,8 @@
// needs resizing, which is costly to do at Safepoint.
bool Dictionary::_some_dictionary_needs_resizing = false;
size_t Dictionary::entry_size() {
if (DumpSharedSpaces) {
return SystemDictionaryShared::dictionary_entry_size();
} else {
return sizeof(DictionaryEntry);
}
}
Dictionary::Dictionary(ClassLoaderData* loader_data, int table_size, bool resizable)
: Hashtable<InstanceKlass*, mtClass>(table_size, (int)entry_size()),
: Hashtable<InstanceKlass*, mtClass>(table_size, (int)sizeof(DictionaryEntry)),
_resizable(resizable), _needs_resizing(false), _loader_data(loader_data) {
};
@ -61,7 +52,7 @@ Dictionary::Dictionary(ClassLoaderData* loader_data, int table_size, bool resiza
Dictionary::Dictionary(ClassLoaderData* loader_data,
int table_size, HashtableBucket<mtClass>* t,
int number_of_entries, bool resizable)
: Hashtable<InstanceKlass*, mtClass>(table_size, (int)entry_size(), t, number_of_entries),
: Hashtable<InstanceKlass*, mtClass>(table_size, (int)sizeof(DictionaryEntry), t, number_of_entries),
_resizable(resizable), _needs_resizing(false), _loader_data(loader_data) {
};
@ -83,9 +74,6 @@ DictionaryEntry* Dictionary::new_entry(unsigned int hash, InstanceKlass* klass)
DictionaryEntry* entry = (DictionaryEntry*)Hashtable<InstanceKlass*, mtClass>::allocate_new_entry(hash, klass);
entry->set_pd_set(NULL);
assert(klass->is_instance_klass(), "Must be");
if (DumpSharedSpaces) {
SystemDictionaryShared::init_shared_dictionary_entry(klass, entry);
}
return entry;
}
@ -280,26 +268,6 @@ void Dictionary::do_unloading() {
}
}
void Dictionary::remove_classes_in_error_state() {
assert(DumpSharedSpaces, "supported only when dumping");
DictionaryEntry* probe = NULL;
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) {
probe = *p;
InstanceKlass* ik = probe->instance_klass();
if (ik->is_in_error_state()) { // purge this entry
*p = probe->next();
free_entry(probe);
ResourceMark rm;
tty->print_cr("Preload Warning: Removed error class: %s", ik->external_name());
continue;
}
p = probe->next_addr();
}
}
}
// Just the classes from defining class loaders
void Dictionary::classes_do(void f(InstanceKlass*)) {
for (int index = 0; index < table_size(); index++) {
@ -349,7 +317,6 @@ void Dictionary::classes_do(MetaspaceClosure* it) {
probe != NULL;
probe = probe->next()) {
it->push(probe->klass_addr());
((SharedDictionaryEntry*)probe)->metaspace_pointers_do(it);
}
}
}
@ -390,9 +357,7 @@ DictionaryEntry* Dictionary::get_entry(int index, unsigned int hash,
entry != NULL;
entry = entry->next()) {
if (entry->hash() == hash && entry->equals(class_name)) {
if (!DumpSharedSpaces || SystemDictionaryShared::is_builtin(entry)) {
return entry;
}
return entry;
}
}
return NULL;
@ -423,18 +388,6 @@ InstanceKlass* Dictionary::find_class(int index, unsigned int hash,
}
// Variant of find_class for shared classes. No locking required, as
// that table is static.
InstanceKlass* Dictionary::find_shared_class(int index, unsigned int hash,
Symbol* name) {
assert (index == index_for(name), "incorrect index?");
DictionaryEntry* entry = get_entry(index, hash, name);
return (entry != NULL) ? entry->instance_klass() : NULL;
}
void Dictionary::add_protection_domain(int index, unsigned int hash,
InstanceKlass* klass,
Handle protection_domain,
@ -465,70 +418,6 @@ bool Dictionary::is_valid_protection_domain(unsigned int hash,
return entry->is_valid_protection_domain(protection_domain);
}
#if INCLUDE_CDS
static bool is_jfr_event_class(Klass *k) {
while (k) {
if (k->name()->equals("jdk/internal/event/Event")) {
return true;
}
k = k->super();
}
return false;
}
void Dictionary::reorder_dictionary_for_sharing() {
// Copy all the dictionary entries into a single master list.
assert(DumpSharedSpaces, "Should only be used at dump time");
DictionaryEntry* master_list = NULL;
for (int i = 0; i < table_size(); ++i) {
DictionaryEntry* p = bucket(i);
while (p != NULL) {
DictionaryEntry* next = p->next();
InstanceKlass*ik = p->instance_klass();
if (ik->has_signer_and_not_archived()) {
// We cannot include signed classes in the archive because the certificates
// used during dump time may be different than those used during
// runtime (due to expiration, etc).
ResourceMark rm;
tty->print_cr("Preload Warning: Skipping %s from signed JAR",
ik->name()->as_C_string());
free_entry(p);
} else if (is_jfr_event_class(ik)) {
// We cannot include JFR event classes because they need runtime-specific
// instrumentation in order to work with -XX:FlightRecorderOptions=retransform=false.
// There are only a small number of these classes, so it's not worthwhile to
// support them and make CDS more complicated.
ResourceMark rm;
tty->print_cr("Skipping JFR event class %s", ik->name()->as_C_string());
free_entry(p);
} else {
p->set_next(master_list);
master_list = p;
}
p = next;
}
set_entry(i, NULL);
}
// Add the dictionary entries back to the list in the correct buckets.
while (master_list != NULL) {
DictionaryEntry* p = master_list;
master_list = master_list->next();
p->set_next(NULL);
Symbol* class_name = p->instance_klass()->name();
// Since the null class loader data isn't copied to the CDS archive,
// compute the hash with NULL for loader data.
unsigned int hash = compute_hash(class_name);
int index = hash_to_index(hash);
p->set_hash(hash);
p->set_next(bucket(index));
set_entry(index, p);
}
}
#endif
SymbolPropertyTable::SymbolPropertyTable(int table_size)
: Hashtable<Symbol*, mtSymbol>(table_size, sizeof(SymbolPropertyEntry))
{
@ -605,10 +494,7 @@ void Dictionary::print_on(outputStream* st) const {
(loader_data() == e->class_loader_data());
st->print("%4d: %s%s", index, is_defining_class ? " " : "^", e->external_name());
ClassLoaderData* cld = e->class_loader_data();
if (cld == NULL) {
// Shared class not restored yet in shared dictionary
st->print(", loader data <shared, not restored>");
} else if (!loader_data()->is_the_null_class_loader_data()) {
if (!loader_data()->is_the_null_class_loader_data()) {
// Class loader output for the dictionary for the null class loader data is
// redundant and obvious.
st->print(", ");
@ -634,7 +520,7 @@ void Dictionary::verify() {
ClassLoaderData* cld = loader_data();
// class loader must be present; a null class loader is the
// boostrap loader
guarantee(cld != NULL || DumpSharedSpaces ||
guarantee(cld != NULL ||
cld->class_loader() == NULL ||
cld->class_loader()->is_instance(),
"checking type of class_loader");

@ -36,8 +36,7 @@ class DictionaryEntry;
class BoolObjectClosure;
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// The data structure for the class loader data dictionaries (and the shared system
// dictionary).
// The data structure for the class loader data dictionaries.
class Dictionary : public Hashtable<InstanceKlass*, mtClass> {
friend class VMStructs;
@ -54,8 +53,6 @@ class Dictionary : public Hashtable<InstanceKlass*, mtClass> {
void clean_cached_protection_domains(DictionaryEntry* probe);
protected:
static size_t entry_size();
public:
Dictionary(ClassLoaderData* loader_data, int table_size, bool resizable = false);
Dictionary(ClassLoaderData* loader_data, int table_size, HashtableBucket<mtClass>* t, int number_of_entries, bool resizable = false);
@ -70,15 +67,12 @@ public:
InstanceKlass* find_class(int index, unsigned int hash, Symbol* name);
InstanceKlass* find_shared_class(int index, unsigned int hash, Symbol* name);
void classes_do(void f(InstanceKlass*));
void classes_do(void f(InstanceKlass*, TRAPS), TRAPS);
void all_entries_do(KlassClosure* closure);
void classes_do(MetaspaceClosure* it);
void unlink();
void remove_classes_in_error_state();
// Unload classes whose defining loaders are unloaded
void do_unloading();
@ -92,9 +86,6 @@ public:
InstanceKlass* klass,
Handle protection_domain, TRAPS);
// Sharing support
void reorder_dictionary_for_sharing() NOT_CDS_RETURN;
void print_on(outputStream* st) const;
void verify();
DictionaryEntry* bucket(int i) const {

@ -1090,8 +1090,7 @@ oop java_lang_Class::archive_mirror(Klass* k, TRAPS) {
if (k->is_instance_klass()) {
InstanceKlass *ik = InstanceKlass::cast(k);
assert(ik->signers() == NULL && !k->has_signer_and_not_archived(),
"class with signer cannot be supported");
assert(ik->signers() == NULL, "class with signer should have been excluded");
if (!(ik->is_shared_boot_class() || ik->is_shared_platform_class() ||
ik->is_shared_app_class())) {

@ -57,9 +57,6 @@
#define ON_STACK_BUFFER_LENGTH 128
// --------------------------------------------------------------------------
inline Symbol* read_symbol_from_compact_hashtable(address base_address, u4 offset) {
return (Symbol*)(base_address + offset);
}
inline bool symbol_equals_compact_hashtable_entry(Symbol* value, const char* key, int len) {
if (value->equals(key, len)) {
@ -70,9 +67,8 @@ inline bool symbol_equals_compact_hashtable_entry(Symbol* value, const char* key
}
}
static CompactHashtable<
static OffsetCompactHashtable<
const char*, Symbol*,
read_symbol_from_compact_hashtable,
symbol_equals_compact_hashtable_entry
> _shared_table;
@ -637,16 +633,7 @@ struct CopyToArchive : StackObj {
unsigned int fixed_hash = hash_shared_symbol((const char*)sym->bytes(), sym->utf8_length());
assert(fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length(), false),
"must not rehash during dumping");
uintx deltax = MetaspaceShared::object_delta(sym);
// When the symbols are stored into the archive, we already check that
// they won't be more than MAX_SHARED_DELTA from the base address, or
// else the dumping would have been aborted.
assert(deltax <= MAX_SHARED_DELTA, "must not be");
u4 delta = u4(deltax);
// add to the compact table
_writer->add(fixed_hash, delta);
_writer->add(fixed_hash, MetaspaceShared::object_delta_u4(sym));
return true;
}
};

@ -96,7 +96,6 @@
#endif
PlaceholderTable* SystemDictionary::_placeholders = NULL;
Dictionary* SystemDictionary::_shared_dictionary = NULL;
LoaderConstraintTable* SystemDictionary::_loader_constraints = NULL;
ResolutionErrorTable* SystemDictionary::_resolution_errors = NULL;
SymbolPropertyTable* SystemDictionary::_invoke_method_table = NULL;
@ -355,7 +354,7 @@ InstanceKlass* SystemDictionary::resolve_super_or_fail(Symbol* child_name,
assert(!FieldType::is_array(super_name), "invalid super class name");
#if INCLUDE_CDS
if (DumpSharedSpaces) {
// Special processing for CDS dump time.
// Special processing for handling UNREGISTERED shared classes.
InstanceKlass* k = SystemDictionaryShared::dump_time_resolve_super_or_fail(child_name,
super_name, class_loader, protection_domain, is_superclass, CHECK_NULL);
if (k) {
@ -1163,39 +1162,11 @@ InstanceKlass* SystemDictionary::resolve_from_stream(Symbol* class_name,
}
#if INCLUDE_CDS
void SystemDictionary::set_shared_dictionary(HashtableBucket<mtClass>* t, int length,
int number_of_entries) {
assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
assert(length == _shared_dictionary_size * sizeof(HashtableBucket<mtClass>),
"bad shared dictionary size.");
_shared_dictionary = new Dictionary(ClassLoaderData::the_null_class_loader_data(),
_shared_dictionary_size, t, number_of_entries,
false /* explicitly set _resizable to false */);
}
// If there is a shared dictionary, then find the entry for the
// given shared system class, if any.
InstanceKlass* SystemDictionary::find_shared_class(Symbol* class_name) {
if (shared_dictionary() != NULL) {
unsigned int d_hash = shared_dictionary()->compute_hash(class_name);
int d_index = shared_dictionary()->hash_to_index(d_hash);
return shared_dictionary()->find_shared_class(d_index, d_hash, class_name);
} else {
return NULL;
}
}
// Load a class for boot loader from the shared spaces (found through
// the shared system dictionary). Force the super class and all interfaces
// to be loaded.
// Load a class for boot loader from the shared spaces. This also
// forces the super class and all interfaces to be loaded.
InstanceKlass* SystemDictionary::load_shared_boot_class(Symbol* class_name,
TRAPS) {
InstanceKlass* ik = find_shared_class(class_name);
// Make sure we only return the boot class.
InstanceKlass* ik = SystemDictionaryShared::find_builtin_class(class_name);
if (ik != NULL && ik->is_shared_boot_class()) {
return load_shared_class(ik, Handle(), Handle(), THREAD);
}
@ -1410,18 +1381,6 @@ InstanceKlass* SystemDictionary::load_shared_class(InstanceKlass* ik,
}
return ik;
}
void SystemDictionary::clear_invoke_method_table() {
SymbolPropertyEntry* spe = NULL;
for (int index = 0; index < _invoke_method_table->table_size(); index++) {
SymbolPropertyEntry* p = _invoke_method_table->bucket(index);
while (p != NULL) {
spe = p;
p = p->next();
_invoke_method_table->free_entry(spe);
}
}
}
#endif // INCLUDE_CDS
InstanceKlass* SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) {
@ -1900,11 +1859,6 @@ void SystemDictionary::oops_do(OopClosure* f) {
invoke_method_table()->oops_do(f);
}
// CDS: scan and relocate all classes in the system dictionary.
void SystemDictionary::classes_do(MetaspaceClosure* it) {
ClassLoaderData::the_null_class_loader_data()->dictionary()->classes_do(it);
}
// CDS: scan and relocate all classes referenced by _well_known_klasses[].
void SystemDictionary::well_known_klasses_do(MetaspaceClosure* it) {
for (int id = FIRST_WKID; id < WKID_LIMIT; id++) {
@ -1920,22 +1874,6 @@ void SystemDictionary::methods_do(void f(Method*)) {
invoke_method_table()->methods_do(f);
}
class RemoveClassesClosure : public CLDClosure {
public:
void do_cld(ClassLoaderData* cld) {
if (cld->is_system_class_loader_data() || cld->is_platform_class_loader_data()) {
cld->dictionary()->remove_classes_in_error_state();
}
}
};
void SystemDictionary::remove_classes_in_error_state() {
ClassLoaderData::the_null_class_loader_data()->dictionary()->remove_classes_in_error_state();
RemoveClassesClosure rcc;
MutexLocker ml(ClassLoaderDataGraph_lock);
ClassLoaderDataGraph::cld_do(&rcc);
}
// ----------------------------------------------------------------------------
// Initialization
@ -2038,6 +1976,7 @@ void SystemDictionary::resolve_well_known_classes(TRAPS) {
HeapShared::fixup_mapped_heap_regions();
// Initialize the constant pool for the Object_class
assert(Object_klass()->is_shared(), "must be");
Object_klass()->constants()->restore_unshareable_info(CHECK);
resolve_wk_klasses_through(WK_KLASS_ENUM_NAME(Class_klass), scan, CHECK);
} else
@ -2921,40 +2860,10 @@ ProtectionDomainCacheEntry* SystemDictionary::cache_get(Handle protection_domain
return _pd_cache_table->get(protection_domain);
}
#if INCLUDE_CDS
void SystemDictionary::reorder_dictionary_for_sharing() {
ClassLoaderData::the_null_class_loader_data()->dictionary()->reorder_dictionary_for_sharing();
}
#endif
size_t SystemDictionary::count_bytes_for_buckets() {
return ClassLoaderData::the_null_class_loader_data()->dictionary()->count_bytes_for_buckets();
}
size_t SystemDictionary::count_bytes_for_table() {
return ClassLoaderData::the_null_class_loader_data()->dictionary()->count_bytes_for_table();
}
void SystemDictionary::copy_buckets(char* top, char* end) {
ClassLoaderData::the_null_class_loader_data()->dictionary()->copy_buckets(top, end);
}
void SystemDictionary::copy_table(char* top, char* end) {
ClassLoaderData::the_null_class_loader_data()->dictionary()->copy_table(top, end);
}
// ----------------------------------------------------------------------------
void SystemDictionary::print_shared(outputStream *st) {
shared_dictionary()->print_on(st);
}
void SystemDictionary::print_on(outputStream *st) {
if (shared_dictionary() != NULL) {
st->print_cr("Shared Dictionary");
shared_dictionary()->print_on(st);
st->cr();
}
CDS_ONLY(SystemDictionaryShared::print_on(st));
GCMutexLocker mu(SystemDictionary_lock);
ClassLoaderDataGraph::print_dictionary(st);
@ -2996,9 +2905,7 @@ void SystemDictionary::dump(outputStream *st, bool verbose) {
if (verbose) {
print_on(st);
} else {
if (shared_dictionary() != NULL) {
shared_dictionary()->print_table_statistics(st, "Shared Dictionary");
}
CDS_ONLY(SystemDictionaryShared::print_table_statistics(st));
ClassLoaderDataGraph::print_dictionary_statistics(st);
placeholders()->print_table_statistics(st, "Placeholder Table");
constraints()->print_table_statistics(st, "LoaderConstraints Table");
@ -3031,60 +2938,6 @@ int SystemDictionaryDCmd::num_arguments() {
}
}
class CombineDictionariesClosure : public CLDClosure {
private:
Dictionary* _master_dictionary;
public:
CombineDictionariesClosure(Dictionary* master_dictionary) :
_master_dictionary(master_dictionary) {}
void do_cld(ClassLoaderData* cld) {
ResourceMark rm;
if (cld->is_unsafe_anonymous()) {
return;
}
if (cld->is_system_class_loader_data() || cld->is_platform_class_loader_data()) {
for (int i = 0; i < cld->dictionary()->table_size(); ++i) {
Dictionary* curr_dictionary = cld->dictionary();
DictionaryEntry* p = curr_dictionary->bucket(i);
while (p != NULL) {
Symbol* name = p->instance_klass()->name();
unsigned int d_hash = _master_dictionary->compute_hash(name);
int d_index = _master_dictionary->hash_to_index(d_hash);
DictionaryEntry* next = p->next();
if (p->literal()->class_loader_data() != cld) {
// This is an initiating class loader entry; don't use it
log_trace(cds)("Skipping initiating cl entry: %s", name->as_C_string());
curr_dictionary->free_entry(p);
} else {
log_trace(cds)("Moved to boot dictionary: %s", name->as_C_string());
curr_dictionary->unlink_entry(p);
p->set_pd_set(NULL); // pd_set is runtime only information and will be reconstructed.
_master_dictionary->add_entry(d_index, p);
}
p = next;
}
*curr_dictionary->bucket_addr(i) = NULL;
}
}
}
};
// Combining platform and system loader dictionaries into boot loader dictionary.
// During run time, we only have one shared dictionary.
void SystemDictionary::combine_shared_dictionaries() {
assert(DumpSharedSpaces, "dump time only");
Dictionary* master_dictionary = ClassLoaderData::the_null_class_loader_data()->dictionary();
CombineDictionariesClosure cdc(master_dictionary);
ClassLoaderDataGraph::cld_do(&cdc);
// These tables are no longer valid or necessary. Keeping them around will
// cause SystemDictionary::verify() to fail. Let's empty them.
_placeholders = new PlaceholderTable(_placeholder_table_size);
_loader_constraints = new LoaderConstraintTable(_loader_constraint_size);
NOT_PRODUCT(SystemDictionary::verify());
}
void SystemDictionary::initialize_oop_storage() {
_vm_weak_oop_storage =
new OopStorage("VM Weak Oop Handles",

@ -349,11 +349,6 @@ public:
// loaders. Returns "true" iff something was unloaded.
static bool do_unloading(GCTimer* gc_timer);
// Used by DumpSharedSpaces only to remove classes that failed verification
static void remove_classes_in_error_state();
static int calculate_systemdictionary_size(int loadedclasses);
// Applies "f->do_oop" to all root oops in the system dictionary.
static void oops_do(OopClosure* f);
@ -364,19 +359,9 @@ public:
static ProtectionDomainCacheTable* pd_cache_table() { return _pd_cache_table; }
public:
// Sharing support.
static void reorder_dictionary_for_sharing() NOT_CDS_RETURN;
static void combine_shared_dictionaries();
static size_t count_bytes_for_buckets();
static size_t count_bytes_for_table();
static void copy_buckets(char* top, char* end);
static void copy_table(char* top, char* end);
static void set_shared_dictionary(HashtableBucket<mtClass>* t, int length,
int number_of_entries);
// Printing
static void print() { return print_on(tty); }
static void print_on(outputStream* st);
static void print_shared(outputStream* st);
static void dump(outputStream* st, bool verbose);
// Monotonically increasing counter which grows as classes are
@ -579,7 +564,6 @@ public:
_loader_constraint_size = 107, // number of entries in constraint table
_resolution_error_size = 107, // number of entries in resolution error table
_invoke_method_size = 139, // number of entries in invoke method table
_shared_dictionary_size = 1009, // number of entries in shared dictionary
_placeholder_table_size = 1009 // number of entries in hash table for placeholders
};
@ -589,9 +573,6 @@ public:
// Hashtable holding placeholders for classes being loaded.
static PlaceholderTable* _placeholders;
// Hashtable holding classes from the shared archive.
static Dictionary* _shared_dictionary;
// Monotonically increasing counter which grows with
// loading classes as well as hot-swapping and breakpoint setting
// and removal.
@ -622,7 +603,6 @@ protected:
friend class VM_PopulateDumpSharedSpace;
friend class TraversePlaceholdersClosure;
static Dictionary* shared_dictionary() { return _shared_dictionary; }
static PlaceholderTable* placeholders() { return _placeholders; }
static LoaderConstraintTable* constraints() { return _loader_constraints; }
static ResolutionErrorTable* resolution_errors() { return _resolution_errors; }
@ -662,7 +642,6 @@ protected:
public:
static bool is_system_class_loader(oop class_loader);
static bool is_platform_class_loader(oop class_loader);
static void clear_invoke_method_table();
// Returns TRUE if the method is a non-public member of class java.lang.Object.
static bool is_nonpublic_Object_method(Method* m) {
@ -674,8 +653,6 @@ public:
static OopStorage* vm_weak_oop_storage();
protected:
static InstanceKlass* find_shared_class(Symbol* class_name);
// Setup link to hierarchy
static void add_to_hierarchy(InstanceKlass* k, TRAPS);

@ -53,12 +53,277 @@
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "utilities/hashtable.inline.hpp"
#include "utilities/resourceHash.hpp"
#include "utilities/stringUtils.hpp"
objArrayOop SystemDictionaryShared::_shared_protection_domains = NULL;
objArrayOop SystemDictionaryShared::_shared_jar_urls = NULL;
objArrayOop SystemDictionaryShared::_shared_jar_manifests = NULL;
DEBUG_ONLY(bool SystemDictionaryShared::_checked_excluded_classes = false;)
class DumpTimeSharedClassInfo: public CHeapObj<mtClass> {
public:
struct DTConstraint {
Symbol* _name;
Symbol* _from_name;
DTConstraint() : _name(NULL), _from_name(NULL) {}
DTConstraint(Symbol* n, Symbol* fn) : _name(n), _from_name(fn) {}
};
InstanceKlass* _klass;
int _id;
int _clsfile_size;
int _clsfile_crc32;
bool _excluded;
GrowableArray<DTConstraint>* _verifier_constraints;
GrowableArray<char>* _verifier_constraint_flags;
DumpTimeSharedClassInfo() {
_klass = NULL;
_id = -1;
_clsfile_size = -1;
_clsfile_crc32 = -1;
_excluded = false;
_verifier_constraints = NULL;
_verifier_constraint_flags = NULL;
}
void add_verification_constraint(InstanceKlass* k, Symbol* name,
Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object);
bool is_builtin() {
return SystemDictionaryShared::is_builtin(_klass);
}
int num_constraints() {
if (_verifier_constraint_flags != NULL) {
return _verifier_constraint_flags->length();
} else {
return 0;
}
}
void metaspace_pointers_do(MetaspaceClosure* it) {
it->push(&_klass);
if (_verifier_constraints != NULL) {
for (int i = 0; i < _verifier_constraints->length(); i++) {
DTConstraint* cons = _verifier_constraints->adr_at(i);
it->push(&cons->_name);
it->push(&cons->_from_name);
}
}
}
};
class DumpTimeSharedClassTable: public ResourceHashtable<
InstanceKlass*,
DumpTimeSharedClassInfo,
primitive_hash<InstanceKlass*>,
primitive_equals<InstanceKlass*>,
15889, // prime number
ResourceObj::C_HEAP>
{
int _builtin_count;
int _unregistered_count;
public:
DumpTimeSharedClassInfo* find_or_allocate_info_for(InstanceKlass* k) {
DumpTimeSharedClassInfo* p = get(k);
if (p == NULL) {
assert(!SystemDictionaryShared::checked_excluded_classes(),
"no new classes can be added after check_excluded_classes");
put(k, DumpTimeSharedClassInfo());
p = get(k);
assert(p != NULL, "sanity");
p->_klass = k;
}
return p;
}
class CountClassByCategory : StackObj {
DumpTimeSharedClassTable* _table;
public:
CountClassByCategory(DumpTimeSharedClassTable* table) : _table(table) {}
bool do_entry(InstanceKlass* k, DumpTimeSharedClassInfo& info) {
if (SystemDictionaryShared::is_builtin(k)) {
++ _table->_builtin_count;
} else {
++ _table->_unregistered_count;
}
return true; // keep on iterating
}
};
void update_counts() {
CountClassByCategory counter(this);
iterate(&counter);
}
int count_of(bool is_builtin) const {
if (is_builtin) {
return _builtin_count;
} else {
return _unregistered_count;
}
}
};
class RunTimeSharedClassInfo {
public:
struct CrcInfo {
int _clsfile_size;
int _clsfile_crc32;
};
// This is different than DumpTimeSharedClassInfo::DTConstraint. We use
// u4 instead of Symbol* to save space on 64-bit CPU.
struct RTConstraint {
u4 _name;
u4 _from_name;
};
InstanceKlass* _klass;
int _num_constraints;
// optional CrcInfo _crc; (only for UNREGISTERED classes)
// optional RTConstraint _verifier_constraints[_num_constraints]
// optional char _verifier_constraint_flags[_num_constraints]
private:
static size_t header_size_size() {
return sizeof(RunTimeSharedClassInfo);
}
static size_t crc_size(InstanceKlass* klass) {
if (!SystemDictionaryShared::is_builtin(klass)) {
return sizeof(CrcInfo);
} else {
return 0;
}
}
static size_t verifier_constraints_size(int num_constraints) {
return sizeof(RTConstraint) * num_constraints;
}
static size_t verifier_constraint_flags_size(int num_constraints) {
return sizeof(char) * num_constraints;
}
public:
static size_t byte_size(InstanceKlass* klass, int num_constraints) {
return header_size_size() +
crc_size(klass) +
verifier_constraints_size(num_constraints) +
verifier_constraint_flags_size(num_constraints);
}
private:
size_t crc_offset() const {
return header_size_size();
}
size_t verifier_constraints_offset() const {
return crc_offset() + crc_size(_klass);
}
size_t verifier_constraint_flags_offset() const {
return verifier_constraints_offset() + verifier_constraints_size(_num_constraints);
}
void check_constraint_offset(int i) const {
assert(0 <= i && i < _num_constraints, "sanity");
}
public:
CrcInfo* crc() const {
assert(crc_size(_klass) > 0, "must be");
return (CrcInfo*)(address(this) + crc_offset());
}
RTConstraint* verifier_constraints() {
assert(_num_constraints > 0, "sanity");
return (RTConstraint*)(address(this) + verifier_constraints_offset());
}
RTConstraint* verifier_constraint_at(int i) {
check_constraint_offset(i);
return verifier_constraints() + i;
}
char* verifier_constraint_flags() {
assert(_num_constraints > 0, "sanity");
return (char*)(address(this) + verifier_constraint_flags_offset());
}
void init(DumpTimeSharedClassInfo& info) {
_klass = info._klass;
_num_constraints = info.num_constraints();
if (!SystemDictionaryShared::is_builtin(_klass)) {
CrcInfo* c = crc();
c->_clsfile_size = info._clsfile_size;
c->_clsfile_crc32 = info._clsfile_crc32;
}
if (_num_constraints > 0) {
RTConstraint* constraints = verifier_constraints();
char* flags = verifier_constraint_flags();
int i;
for (i = 0; i < _num_constraints; i++) {
constraints[i]._name = MetaspaceShared::object_delta_u4(info._verifier_constraints->at(i)._name);
constraints[i]._from_name = MetaspaceShared::object_delta_u4(info._verifier_constraints->at(i)._from_name);
}
for (i = 0; i < _num_constraints; i++) {
flags[i] = info._verifier_constraint_flags->at(i);
}
}
}
bool matches(int clsfile_size, int clsfile_crc32) const {
return crc()->_clsfile_size == clsfile_size &&
crc()->_clsfile_crc32 == clsfile_crc32;
}
Symbol* get_constraint_name(int i) {
return (Symbol*)(SharedBaseAddress + verifier_constraint_at(i)->_name);
}
Symbol* get_constraint_from_name(int i) {
return (Symbol*)(SharedBaseAddress + verifier_constraint_at(i)->_from_name);
}
char get_constraint_flag(int i) {
check_constraint_offset(i);
return verifier_constraint_flags()[i];
}
private:
// ArchiveCompactor::allocate() has reserved a pointer immediately before
// archived InstanceKlasses. We can use this slot to do a quick
// lookup of InstanceKlass* -> RunTimeSharedClassInfo* without
// building a new hashtable.
//
// info_pointer_addr(klass) --> 0x0100 RunTimeSharedClassInfo*
// InstanceKlass* klass --> 0x0108 <C++ vtbl>
// 0x0110 fields from Klass ...
static RunTimeSharedClassInfo** info_pointer_addr(InstanceKlass* klass) {
return &((RunTimeSharedClassInfo**)klass)[-1];
}
public:
static RunTimeSharedClassInfo* get_for(InstanceKlass* klass) {
return *info_pointer_addr(klass);
}
static void set_for(InstanceKlass* klass, RunTimeSharedClassInfo* record) {
*info_pointer_addr(klass) = record;
}
// Used by RunTimeSharedDictionary to implement OffsetCompactHashtable::EQUALS
static inline bool EQUALS(
const RunTimeSharedClassInfo* value, Symbol* key, int len_unused) {
return (value->_klass->name() == key);
}
};
class RunTimeSharedDictionary : public OffsetCompactHashtable<
Symbol*,
const RunTimeSharedClassInfo*,
RunTimeSharedClassInfo::EQUALS> {};
static DumpTimeSharedClassTable* _dumptime_table = NULL;
static RunTimeSharedDictionary _builtin_dictionary;
static RunTimeSharedDictionary _unregistered_dictionary;
oop SystemDictionaryShared::shared_protection_domain(int index) {
return _shared_protection_domains->obj_at(index);
@ -478,9 +743,8 @@ InstanceKlass* SystemDictionaryShared::find_or_load_shared_class(
return NULL;
}
if (shared_dictionary() != NULL &&
(SystemDictionary::is_system_class_loader(class_loader()) ||
SystemDictionary::is_platform_class_loader(class_loader()))) {
if (SystemDictionary::is_system_class_loader(class_loader()) ||
SystemDictionary::is_platform_class_loader(class_loader())) {
// Fix for 4474172; see evaluation for more details
class_loader = Handle(
THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader()));
@ -523,8 +787,7 @@ InstanceKlass* SystemDictionaryShared::find_or_load_shared_class(
InstanceKlass* SystemDictionaryShared::load_shared_class_for_builtin_loader(
Symbol* class_name, Handle class_loader, TRAPS) {
assert(UseSharedSpaces, "must be");
assert(shared_dictionary() != NULL, "already checked");
InstanceKlass* ik = shared_dictionary()->find_class_for_builtin_loader(class_name);
InstanceKlass* ik = find_builtin_class(class_name);
if (ik != NULL) {
if ((ik->is_shared_app_class() &&
@ -536,7 +799,6 @@ InstanceKlass* SystemDictionaryShared::load_shared_class_for_builtin_loader(
return load_shared_class(ik, class_loader, protection_domain, THREAD);
}
}
return NULL;
}
@ -574,12 +836,12 @@ void SystemDictionaryShared::allocate_shared_data_arrays(int size, TRAPS) {
}
// This function is called for loading only UNREGISTERED classes
InstanceKlass* SystemDictionaryShared::lookup_from_stream(const Symbol* class_name,
InstanceKlass* SystemDictionaryShared::lookup_from_stream(Symbol* class_name,
Handle class_loader,
Handle protection_domain,
const ClassFileStream* cfs,
TRAPS) {
if (shared_dictionary() == NULL) {
if (!UseSharedSpaces) {
return NULL;
}
if (class_name == NULL) { // don't do this for anonymous classes
@ -592,27 +854,18 @@ InstanceKlass* SystemDictionaryShared::lookup_from_stream(const Symbol* class_na
return NULL;
}
ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
InstanceKlass* k;
{ // UNREGISTERED loader
if (!shared_dictionary()->class_exists_for_unregistered_loader(class_name)) {
// No classes of this name for unregistered loaders.
return NULL;
}
int clsfile_size = cfs->length();
int clsfile_crc32 = ClassLoader::crc32(0, (const char*)cfs->buffer(), cfs->length());
k = shared_dictionary()->find_class_for_unregistered_loader(class_name,
clsfile_size, clsfile_crc32);
}
if (k == NULL) { // not archived
const RunTimeSharedClassInfo* record = find_record(&_unregistered_dictionary, class_name);
if (record == NULL) {
return NULL;
}
return acquire_class_for_current_thread(k, class_loader,
int clsfile_size = cfs->length();
int clsfile_crc32 = ClassLoader::crc32(0, (const char*)cfs->buffer(), cfs->length());
if (!record->matches(clsfile_size, clsfile_crc32)) {
return NULL;
}
return acquire_class_for_current_thread(record->_klass, class_loader,
protection_domain, THREAD);
}
@ -649,19 +902,27 @@ InstanceKlass* SystemDictionaryShared::acquire_class_for_current_thread(
return shared_klass;
}
bool SystemDictionaryShared::add_non_builtin_klass(Symbol* name,
ClassLoaderData* loader_data,
InstanceKlass* k,
TRAPS) {
assert(DumpSharedSpaces, "only when dumping");
assert(boot_loader_dictionary() != NULL, "must be");
static ResourceHashtable<
Symbol*, bool,
primitive_hash<Symbol*>,
primitive_equals<Symbol*>,
6661, // prime number
ResourceObj::C_HEAP> _loaded_unregistered_classes;
if (boot_loader_dictionary()->add_non_builtin_klass(name, loader_data, k)) {
MutexLocker mu_r(Compile_lock, THREAD); // not really necessary, but add_to_hierarchy asserts this.
add_to_hierarchy(k, CHECK_0);
bool SystemDictionaryShared::add_unregistered_class(InstanceKlass* k, TRAPS) {
assert(DumpSharedSpaces, "only when dumping");
Symbol* name = k->name();
if (_loaded_unregistered_classes.get(name) != NULL) {
// We don't allow duplicated unregistered classes of the same name.
return false;
} else {
bool isnew = _loaded_unregistered_classes.put(name, true);
assert(isnew, "sanity");
MutexLocker mu_r(Compile_lock, THREAD); // add_to_hierarchy asserts this.
SystemDictionary::add_to_hierarchy(k, CHECK_0);
return true;
}
return false;
}
// This function is called to resolve the super/interfaces of shared classes for
@ -698,81 +959,138 @@ InstanceKlass* SystemDictionaryShared::dump_time_resolve_super_or_fail(
}
}
struct SharedMiscInfo {
InstanceKlass* _klass;
int _clsfile_size;
int _clsfile_crc32;
};
static GrowableArray<SharedMiscInfo>* misc_info_array = NULL;
DumpTimeSharedClassInfo* SystemDictionaryShared::find_or_allocate_info_for(InstanceKlass* k) {
if (_dumptime_table == NULL) {
_dumptime_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeSharedClassTable();
}
return _dumptime_table->find_or_allocate_info_for(k);
}
void SystemDictionaryShared::set_shared_class_misc_info(InstanceKlass* k, ClassFileStream* cfs) {
assert(DumpSharedSpaces, "only when dumping");
int clsfile_size = cfs->length();
int clsfile_crc32 = ClassLoader::crc32(0, (const char*)cfs->buffer(), cfs->length());
if (misc_info_array == NULL) {
misc_info_array = new (ResourceObj::C_HEAP, mtClass) GrowableArray<SharedMiscInfo>(20, /*c heap*/ true);
}
SharedMiscInfo misc_info;
DEBUG_ONLY({
for (int i=0; i<misc_info_array->length(); i++) {
misc_info = misc_info_array->at(i);
assert(misc_info._klass != k, "cannot call set_shared_class_misc_info twice for the same class");
}
});
misc_info._klass = k;
misc_info._clsfile_size = clsfile_size;
misc_info._clsfile_crc32 = clsfile_crc32;
misc_info_array->append(misc_info);
assert(!is_builtin(k), "must be unregistered class");
DumpTimeSharedClassInfo* info = find_or_allocate_info_for(k);
info->_clsfile_size = cfs->length();
info->_clsfile_crc32 = ClassLoader::crc32(0, (const char*)cfs->buffer(), cfs->length());
}
void SystemDictionaryShared::init_shared_dictionary_entry(InstanceKlass* k, DictionaryEntry* ent) {
SharedDictionaryEntry* entry = (SharedDictionaryEntry*)ent;
entry->_id = -1;
entry->_clsfile_size = -1;
entry->_clsfile_crc32 = -1;
entry->_verifier_constraints = NULL;
entry->_verifier_constraint_flags = NULL;
void SystemDictionaryShared::init_dumptime_info(InstanceKlass* k) {
(void)find_or_allocate_info_for(k);
}
if (misc_info_array != NULL) {
for (int i=0; i<misc_info_array->length(); i++) {
SharedMiscInfo misc_info = misc_info_array->at(i);
if (misc_info._klass == k) {
entry->_clsfile_size = misc_info._clsfile_size;
entry->_clsfile_crc32 = misc_info._clsfile_crc32;
misc_info_array->remove_at(i);
return;
}
void SystemDictionaryShared::remove_dumptime_info(InstanceKlass* k) {
_dumptime_table->remove(k);
}
bool SystemDictionaryShared::is_jfr_event_class(InstanceKlass *k) {
while (k) {
if (k->name()->equals("jdk/internal/event/Event")) {
return true;
}
k = k->java_super();
}
return false;
}
void SystemDictionaryShared::warn_excluded(InstanceKlass* k, const char* reason) {
ResourceMark rm;
log_warning(cds)("Skipping %s: %s", k->name()->as_C_string(), reason);
}
bool SystemDictionaryShared::should_be_excluded(InstanceKlass* k) {
if (k->class_loader_data()->is_unsafe_anonymous()) {
return true; // unsafe anonymous classes are not archived, skip
}
if (k->is_in_error_state()) {
return true;
}
if (k->shared_classpath_index() < 0 && is_builtin(k)) {
// These are classes loaded from unsupported locations (such as those loaded by JVMTI native
// agent during dump time).
warn_excluded(k, "Unsupported location");
return true;
}
if (k->signers() != NULL) {
// We cannot include signed classes in the archive because the certificates
// used during dump time may be different than those used during
// runtime (due to expiration, etc).
warn_excluded(k, "Signed JAR");
return true;
}
if (is_jfr_event_class(k)) {
// We cannot include JFR event classes because they need runtime-specific
// instrumentation in order to work with -XX:FlightRecorderOptions=retransform=false.
// There are only a small number of these classes, so it's not worthwhile to
// support them and make CDS more complicated.
warn_excluded(k, "JFR event class");
return true;
}
return false;
}
// k is a class before relocating by ArchiveCompactor
void SystemDictionaryShared::validate_before_archiving(InstanceKlass* k) {
ResourceMark rm;
const char* name = k->name()->as_C_string();
DumpTimeSharedClassInfo* info = _dumptime_table->get(k);
guarantee(info != NULL, "Class %s must be entered into _dumptime_table", name);
guarantee(!info->_excluded, "Should not attempt to archive excluded class %s", name);
if (is_builtin(k)) {
guarantee(k->loader_type() != 0,
"Class loader type must be set for BUILTIN class %s", name);
} else {
guarantee(k->loader_type() == 0,
"Class loader type must not be set for UNREGISTERED class %s", name);
}
}
class ExcludeDumpTimeSharedClasses : StackObj {
public:
bool do_entry(InstanceKlass* k, DumpTimeSharedClassInfo& info) {
if (SystemDictionaryShared::should_be_excluded(k)) {
info._excluded = true;
}
return true; // keep on iterating
}
};
void SystemDictionaryShared::check_excluded_classes() {
ExcludeDumpTimeSharedClasses excl;
_dumptime_table->iterate(&excl);
DEBUG_ONLY(_checked_excluded_classes = true;)
}
bool SystemDictionaryShared::is_excluded_class(InstanceKlass* k) {
assert(_checked_excluded_classes, "sanity");
assert(DumpSharedSpaces, "only when dumping");
return find_or_allocate_info_for(k)->_excluded;
}
class IterateDumpTimeSharedClassTable : StackObj {
MetaspaceClosure *_it;
public:
IterateDumpTimeSharedClassTable(MetaspaceClosure* it) : _it(it) {}
bool do_entry(InstanceKlass* k, DumpTimeSharedClassInfo& info) {
if (!info._excluded) {
info.metaspace_pointers_do(_it);
}
return true; // keep on iterating
}
};
void SystemDictionaryShared::dumptime_classes_do(class MetaspaceClosure* it) {
IterateDumpTimeSharedClassTable iter(it);
_dumptime_table->iterate(&iter);
}
bool SystemDictionaryShared::add_verification_constraint(InstanceKlass* k, Symbol* name,
Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object) {
assert(DumpSharedSpaces, "called at dump time only");
// Skip unsafe anonymous classes, which are not archived as they are not in
// dictionary (see assert_no_unsafe_anonymous_classes_in_dictionaries() in
// VM_PopulateDumpSharedSpace::doit()).
if (k->class_loader_data()->is_unsafe_anonymous()) {
return true; // unsafe anonymous classes are not archived, skip
}
SharedDictionaryEntry* entry = ((SharedDictionary*)(k->class_loader_data()->dictionary()))->find_entry_for(k);
ResourceMark rm;
// Lambda classes are not archived and will be regenerated at runtime.
if (entry == NULL) {
guarantee(strstr(k->name()->as_C_string(), "Lambda$") != NULL,
"class should be in dictionary before being verified");
return true;
}
entry->add_verification_constraint(name, from_name, from_field_is_protected,
from_is_array, from_is_object);
if (entry->is_builtin()) {
DumpTimeSharedClassInfo* info = find_or_allocate_info_for(k);
info->add_verification_constraint(k, name, from_name, from_field_is_protected,
from_is_array, from_is_object);
if (is_builtin(k)) {
// For builtin class loaders, we can try to complete the verification check at dump time,
// because we can resolve all the constraint classes.
return false;
@ -783,135 +1101,54 @@ bool SystemDictionaryShared::add_verification_constraint(InstanceKlass* k, Symbo
}
}
void SystemDictionaryShared::finalize_verification_constraints_for(InstanceKlass* k) {
if (!k->is_unsafe_anonymous()) {
SharedDictionaryEntry* entry = ((SharedDictionary*)(k->class_loader_data()->dictionary()))->find_entry_for(k);
entry->finalize_verification_constraints();
}
}
void SystemDictionaryShared::finalize_verification_constraints() {
MutexLocker mcld(ClassLoaderDataGraph_lock);
ClassLoaderDataGraph::dictionary_classes_do(finalize_verification_constraints_for);
}
void SystemDictionaryShared::check_verification_constraints(InstanceKlass* klass,
TRAPS) {
assert(!DumpSharedSpaces && UseSharedSpaces, "called at run time with CDS enabled only");
SharedDictionaryEntry* entry = shared_dictionary()->find_entry_for(klass);
assert(entry != NULL, "call this only for shared classes");
entry->check_verification_constraints(klass, THREAD);
}
SharedDictionaryEntry* SharedDictionary::find_entry_for(InstanceKlass* klass) {
Symbol* class_name = klass->name();
unsigned int hash = compute_hash(class_name);
int index = hash_to_index(hash);
for (SharedDictionaryEntry* entry = bucket(index);
entry != NULL;
entry = entry->next()) {
if (entry->hash() == hash && entry->literal() == klass) {
return entry;
}
}
return NULL;
}
void SharedDictionaryEntry::add_verification_constraint(Symbol* name,
void DumpTimeSharedClassInfo::add_verification_constraint(InstanceKlass* k, Symbol* name,
Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object) {
if (_verifier_constraints == NULL) {
_verifier_constraints = new(ResourceObj::C_HEAP, mtClass) GrowableArray<Symbol*>(8, true, mtClass);
_verifier_constraints = new(ResourceObj::C_HEAP, mtClass) GrowableArray<DTConstraint>(4, true, mtClass);
}
if (_verifier_constraint_flags == NULL) {
_verifier_constraint_flags = new(ResourceObj::C_HEAP, mtClass) GrowableArray<char>(4, true, mtClass);
}
GrowableArray<Symbol*>* vc_array = (GrowableArray<Symbol*>*)_verifier_constraints;
for (int i=0; i<vc_array->length(); i+= 2) {
if (name == vc_array->at(i) &&
from_name == vc_array->at(i+1)) {
GrowableArray<DTConstraint>* vc_array = _verifier_constraints;
for (int i = 0; i < vc_array->length(); i++) {
DTConstraint* p = vc_array->adr_at(i);
if (name == p->_name && from_name == p->_from_name) {
return;
}
}
vc_array->append(name);
vc_array->append(from_name);
DTConstraint cons(name, from_name);
vc_array->append(cons);
GrowableArray<char>* vcflags_array = (GrowableArray<char>*)_verifier_constraint_flags;
GrowableArray<char>* vcflags_array = _verifier_constraint_flags;
char c = 0;
c |= from_field_is_protected ? FROM_FIELD_IS_PROTECTED : 0;
c |= from_is_array ? FROM_IS_ARRAY : 0;
c |= from_is_object ? FROM_IS_OBJECT : 0;
c |= from_field_is_protected ? SystemDictionaryShared::FROM_FIELD_IS_PROTECTED : 0;
c |= from_is_array ? SystemDictionaryShared::FROM_IS_ARRAY : 0;
c |= from_is_object ? SystemDictionaryShared::FROM_IS_OBJECT : 0;
vcflags_array->append(c);
if (log_is_enabled(Trace, cds, verification)) {
ResourceMark rm;
log_trace(cds, verification)("add_verification_constraint: %s: %s must be subclass of %s",
instance_klass()->external_name(), from_name->as_klass_external_name(),
k->external_name(), from_name->as_klass_external_name(),
name->as_klass_external_name());
}
}
int SharedDictionaryEntry::finalize_verification_constraints() {
assert(DumpSharedSpaces, "called at dump time only");
Thread* THREAD = Thread::current();
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
GrowableArray<Symbol*>* vc_array = (GrowableArray<Symbol*>*)_verifier_constraints;
GrowableArray<char>* vcflags_array = (GrowableArray<char>*)_verifier_constraint_flags;
void SystemDictionaryShared::check_verification_constraints(InstanceKlass* klass,
TRAPS) {
assert(!DumpSharedSpaces && UseSharedSpaces, "called at run time with CDS enabled only");
RunTimeSharedClassInfo* record = RunTimeSharedClassInfo::get_for(klass);
if (vc_array != NULL) {
if (log_is_enabled(Trace, cds, verification)) {
ResourceMark rm;
log_trace(cds, verification)("finalize_verification_constraint: %s",
literal()->external_name());
}
int length = record->_num_constraints;
if (length > 0) {
for (int i = 0; i < length; i++) {
Symbol* name = record->get_constraint_name(i);
Symbol* from_name = record->get_constraint_from_name(i);
char c = record->get_constraint_flag(i);
// Copy the constraints from C_HEAP-alloced GrowableArrays to Metaspace-alloced
// Arrays
int size = 0;
{
// FIXME: change this to be done after relocation, so we can use symbol offset??
int length = vc_array->length();
Array<Symbol*>* out = MetadataFactory::new_array<Symbol*>(loader_data, length, 0, THREAD);
assert(out != NULL, "Dump time allocation failure would have aborted VM");
for (int i=0; i<length; i++) {
out->at_put(i, vc_array->at(i));
}
_verifier_constraints = out;
size += out->size() * BytesPerWord;
delete vc_array;
}
{
int length = vcflags_array->length();
Array<char>* out = MetadataFactory::new_array<char>(loader_data, length, 0, THREAD);
assert(out != NULL, "Dump time allocation failure would have aborted VM");
for (int i=0; i<length; i++) {
out->at_put(i, vcflags_array->at(i));
}
_verifier_constraint_flags = out;
size += out->size() * BytesPerWord;
delete vcflags_array;
}
return size;
}
return 0;
}
void SharedDictionaryEntry::check_verification_constraints(InstanceKlass* klass, TRAPS) {
Array<Symbol*>* vc_array = (Array<Symbol*>*)_verifier_constraints;
Array<char>* vcflags_array = (Array<char>*)_verifier_constraint_flags;
if (vc_array != NULL) {
int length = vc_array->length();
for (int i=0; i<length; i+=2) {
Symbol* name = vc_array->at(i);
Symbol* from_name = vc_array->at(i+1);
char c = vcflags_array->at(i/2);
bool from_field_is_protected = (c & FROM_FIELD_IS_PROTECTED) ? true : false;
bool from_is_array = (c & FROM_IS_ARRAY) ? true : false;
bool from_is_object = (c & FROM_IS_OBJECT) ? true : false;
bool from_field_is_protected = (c & SystemDictionaryShared::FROM_FIELD_IS_PROTECTED) ? true : false;
bool from_is_array = (c & SystemDictionaryShared::FROM_IS_ARRAY) ? true : false;
bool from_is_object = (c & SystemDictionaryShared::FROM_IS_OBJECT) ? true : false;
bool ok = VerificationType::resolve_and_check_assignability(klass, name,
from_name, from_field_is_protected, from_is_array, from_is_object, CHECK);
@ -930,132 +1167,100 @@ void SharedDictionaryEntry::check_verification_constraints(InstanceKlass* klass,
}
}
void SharedDictionaryEntry::metaspace_pointers_do(MetaspaceClosure* it) {
it->push((Array<Symbol*>**)&_verifier_constraints);
it->push((Array<char>**)&_verifier_constraint_flags);
class CopySharedClassInfoToArchive : StackObj {
CompactHashtableWriter* _writer;
bool _is_builtin;
public:
CopySharedClassInfoToArchive(CompactHashtableWriter* writer, bool is_builtin)
: _writer(writer), _is_builtin(is_builtin) {}
bool do_entry(InstanceKlass* k, DumpTimeSharedClassInfo& info) {
if (!info._excluded && info.is_builtin() == _is_builtin) {
size_t byte_size = RunTimeSharedClassInfo::byte_size(info._klass, info.num_constraints());
RunTimeSharedClassInfo* record =
(RunTimeSharedClassInfo*)MetaspaceShared::read_only_space_alloc(byte_size);
record->init(info);
unsigned int hash = primitive_hash<Symbol*>(info._klass->name());
_writer->add(hash, MetaspaceShared::object_delta_u4(record));
// Save this for quick runtime lookup of InstanceKlass* -> RunTimeSharedClassInfo*
RunTimeSharedClassInfo::set_for(info._klass, record);
}
return true; // keep on iterating
}
};
void SystemDictionaryShared::write_dictionary(RunTimeSharedDictionary* dictionary, bool is_builtin) {
CompactHashtableStats stats;
dictionary->reset();
int num_buckets = CompactHashtableWriter::default_num_buckets(_dumptime_table->count_of(is_builtin));
CompactHashtableWriter writer(num_buckets, &stats);
CopySharedClassInfoToArchive copy(&writer, is_builtin);
_dumptime_table->iterate(&copy);
writer.dump(dictionary, is_builtin ? "builtin dictionary" : "unregistered dictionary");
}
bool SharedDictionary::add_non_builtin_klass(const Symbol* class_name,
ClassLoaderData* loader_data,
InstanceKlass* klass) {
void SystemDictionaryShared::write_to_archive() {
_dumptime_table->update_counts();
write_dictionary(&_builtin_dictionary, true);
write_dictionary(&_unregistered_dictionary, false);
}
void SystemDictionaryShared::serialize_dictionary_headers(SerializeClosure* soc) {
_builtin_dictionary.serialize_header(soc);
_unregistered_dictionary.serialize_header(soc);
}
const RunTimeSharedClassInfo*
SystemDictionaryShared::find_record(RunTimeSharedDictionary* dict, Symbol* name) {
if (UseSharedSpaces) {
unsigned int hash = primitive_hash<Symbol*>(name);
return dict->lookup(name, hash, 0);
} else {
return NULL;
}
}
InstanceKlass* SystemDictionaryShared::find_builtin_class(Symbol* name) {
const RunTimeSharedClassInfo* record = find_record(&_builtin_dictionary, name);
if (record) {
return record->_klass;
} else {
return NULL;
}
}
void SystemDictionaryShared::update_shared_entry(InstanceKlass* k, int id) {
assert(DumpSharedSpaces, "supported only when dumping");
assert(klass != NULL, "adding NULL klass");
assert(klass->name() == class_name, "sanity check on name");
assert(klass->shared_classpath_index() < 0,
"the shared classpath index should not be set for shared class loaded by the custom loaders");
DumpTimeSharedClassInfo* info = find_or_allocate_info_for(k);
info->_id = id;
}
// Add an entry for a non-builtin class.
// For a shared class for custom class loaders, SystemDictionary::resolve_or_null will
// not find this class, because is_builtin() is false.
unsigned int hash = compute_hash(class_name);
int index = hash_to_index(hash);
class SharedDictionaryPrinter : StackObj {
outputStream* _st;
int _index;
public:
SharedDictionaryPrinter(outputStream* st) : _st(st), _index(0) {}
for (SharedDictionaryEntry* entry = bucket(index);
entry != NULL;
entry = entry->next()) {
if (entry->hash() == hash) {
InstanceKlass* klass = entry->instance_klass();
if (klass->name() == class_name && klass->class_loader_data() == loader_data) {
// There is already a class defined with the same name
return false;
}
}
void do_value(const RunTimeSharedClassInfo* record) {
ResourceMark rm;
_st->print_cr("%4d: %s", (_index++), record->_klass->external_name());
}
};
assert(Dictionary::entry_size() >= sizeof(SharedDictionaryEntry), "must be big enough");
SharedDictionaryEntry* entry = (SharedDictionaryEntry*)new_entry(hash, klass);
add_entry(index, entry);
assert(entry->is_unregistered(), "sanity");
assert(!entry->is_builtin(), "sanity");
return true;
}
//-----------------
// SharedDictionary
//-----------------
InstanceKlass* SharedDictionary::find_class_for_builtin_loader(const Symbol* name) const {
SharedDictionaryEntry* entry = get_entry_for_builtin_loader(name);
return entry != NULL ? entry->instance_klass() : (InstanceKlass*)NULL;
}
InstanceKlass* SharedDictionary::find_class_for_unregistered_loader(const Symbol* name,
int clsfile_size,
int clsfile_crc32) const {
const SharedDictionaryEntry* entry = get_entry_for_unregistered_loader(name,
clsfile_size,
clsfile_crc32);
return entry != NULL ? entry->instance_klass() : NULL;
}
void SharedDictionary::update_entry(InstanceKlass* klass, int id) {
assert(DumpSharedSpaces, "supported only when dumping");
Symbol* class_name = klass->name();
unsigned int hash = compute_hash(class_name);
int index = hash_to_index(hash);
for (SharedDictionaryEntry* entry = bucket(index);
entry != NULL;
entry = entry->next()) {
if (entry->hash() == hash && entry->literal() == klass) {
entry->_id = id;
return;
}
void SystemDictionaryShared::print_on(outputStream* st) {
if (UseSharedSpaces) {
st->print_cr("Shared Dictionary");
SharedDictionaryPrinter p(st);
_builtin_dictionary.iterate(&p);
_unregistered_dictionary.iterate(&p);
}
ShouldNotReachHere();
}
SharedDictionaryEntry* SharedDictionary::get_entry_for_builtin_loader(const Symbol* class_name) const {
assert(!DumpSharedSpaces, "supported only when at runtime");
unsigned int hash = compute_hash(class_name);
const int index = hash_to_index(hash);
for (SharedDictionaryEntry* entry = bucket(index);
entry != NULL;
entry = entry->next()) {
if (entry->hash() == hash && entry->equals(class_name)) {
if (entry->is_builtin()) {
return entry;
}
}
void SystemDictionaryShared::print_table_statistics(outputStream* st) {
if (UseSharedSpaces) {
_builtin_dictionary.print_table_statistics(st, "Builtin Shared Dictionary");
_unregistered_dictionary.print_table_statistics(st, "Unregistered Shared Dictionary");
}
return NULL;
}
SharedDictionaryEntry* SharedDictionary::get_entry_for_unregistered_loader(const Symbol* class_name,
int clsfile_size,
int clsfile_crc32) const {
assert(!DumpSharedSpaces, "supported only when at runtime");
unsigned int hash = compute_hash(class_name);
int index = hash_to_index(hash);
for (SharedDictionaryEntry* entry = bucket(index);
entry != NULL;
entry = entry->next()) {
if (entry->hash() == hash && entry->equals(class_name)) {
if (entry->is_unregistered()) {
if (clsfile_size == -1) {
// We're called from class_exists_for_unregistered_loader. At run time, we want to
// compute the CRC of a ClassFileStream only if there is an UNREGISTERED class
// with the matching name.
return entry;
} else {
// We're called from find_class_for_unregistered_loader
if (entry->_clsfile_size && clsfile_crc32 == entry->_clsfile_crc32) {
return entry;
}
}
// There can be only 1 class with this name for unregistered loaders.
return NULL;
}
}
}
return NULL;
}

@ -36,13 +36,12 @@
Handling of the classes in the AppCDS archive
To ensure safety and to simplify the implementation, archived classes are
"segregated" into several types. The following rules describe how they
"segregated" into 2 types. The following rules describe how they
are stored and looked up.
[1] Category of archived classes
There are 3 disjoint groups of classes stored in the AppCDS archive. They are
categorized as by their SharedDictionaryEntry::loader_type()
There are 2 disjoint groups of classes stored in the AppCDS archive:
BUILTIN: These classes may be defined ONLY by the BOOT/PLATFORM/APP
loaders.
@ -83,112 +82,39 @@
Bar id: 3 super: 0 interfaces: 1 source: /foo.jar
[3] Identifying the loader_type of archived classes in the shared dictionary
Each archived Klass* C is associated with a SharedDictionaryEntry* E
[3] Identifying the category of archived classes
BUILTIN: (C->shared_classpath_index() >= 0)
UNREGISTERED: (C->shared_classpath_index() < 0)
UNREGISTERED: (C->shared_classpath_index() == UNREGISTERED_INDEX (-9999))
[4] Lookup of archived classes at run time:
(a) BUILTIN loaders:
Search the shared directory for a BUILTIN class with a matching name.
search _builtin_dictionary
(b) UNREGISTERED loaders:
The search originates with SystemDictionaryShared::lookup_from_stream().
Search the shared directory for a UNREGISTERED class with a matching
(name, clsfile_len, clsfile_crc32) tuple.
search _unregistered_dictionary for an entry that matches the
(name, clsfile_len, clsfile_crc32).
===============================================================================*/
#define UNREGISTERED_INDEX -9999
class ClassFileStream;
class DumpTimeSharedClassInfo;
class DumpTimeSharedClassTable;
class RunTimeSharedClassInfo;
class RunTimeSharedDictionary;
// Archived classes need extra information not needed by traditionally loaded classes.
// To keep footprint small, we add these in the dictionary entry instead of the InstanceKlass.
class SharedDictionaryEntry : public DictionaryEntry {
class SystemDictionaryShared: public SystemDictionary {
public:
enum LoaderType {
LT_BUILTIN,
LT_UNREGISTERED
};
enum {
FROM_FIELD_IS_PROTECTED = 1 << 0,
FROM_IS_ARRAY = 1 << 1,
FROM_IS_OBJECT = 1 << 2
};
int _id;
int _clsfile_size;
int _clsfile_crc32;
void* _verifier_constraints; // FIXME - use a union here to avoid type casting??
void* _verifier_constraint_flags;
// See "Identifying the loader_type of archived classes" comments above.
LoaderType loader_type() const {
InstanceKlass* k = instance_klass();
if ((k->shared_classpath_index() != UNREGISTERED_INDEX)) {
return LT_BUILTIN;
} else {
return LT_UNREGISTERED;
}
}
SharedDictionaryEntry* next() {
return (SharedDictionaryEntry*)(DictionaryEntry::next());
}
bool is_builtin() const {
return loader_type() == LT_BUILTIN;
}
bool is_unregistered() const {
return loader_type() == LT_UNREGISTERED;
}
void add_verification_constraint(Symbol* name,
Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object);
int finalize_verification_constraints();
void check_verification_constraints(InstanceKlass* klass, TRAPS);
void metaspace_pointers_do(MetaspaceClosure* it) NOT_CDS_RETURN;
};
class SharedDictionary : public Dictionary {
SharedDictionaryEntry* get_entry_for_builtin_loader(const Symbol* name) const;
SharedDictionaryEntry* get_entry_for_unregistered_loader(const Symbol* name,
int clsfile_size,
int clsfile_crc32) const;
// Convenience functions
SharedDictionaryEntry* bucket(int index) const {
return (SharedDictionaryEntry*)(Dictionary::bucket(index));
}
public:
SharedDictionaryEntry* find_entry_for(InstanceKlass* klass);
bool add_non_builtin_klass(const Symbol* class_name,
ClassLoaderData* loader_data,
InstanceKlass* obj);
void update_entry(InstanceKlass* klass, int id);
InstanceKlass* find_class_for_builtin_loader(const Symbol* name) const;
InstanceKlass* find_class_for_unregistered_loader(const Symbol* name,
int clsfile_size,
int clsfile_crc32) const;
bool class_exists_for_unregistered_loader(const Symbol* name) {
return (get_entry_for_unregistered_loader(name, -1, -1) != NULL);
}
};
class SystemDictionaryShared: public SystemDictionary {
private:
// These _shared_xxxs arrays are used to initialize the java.lang.Package and
// java.security.ProtectionDomain objects associated with each shared class.
@ -282,8 +208,17 @@ private:
Handle class_loader,
Handle protection_domain,
TRAPS);
static void finalize_verification_constraints_for(InstanceKlass* k);
static DumpTimeSharedClassInfo* find_or_allocate_info_for(InstanceKlass* k);
static void write_dictionary(RunTimeSharedDictionary* dictionary, bool is_builtin);
static bool is_jfr_event_class(InstanceKlass *k);
static void warn_excluded(InstanceKlass* k, const char* reason);
DEBUG_ONLY(static bool _checked_excluded_classes;)
public:
static InstanceKlass* find_builtin_class(Symbol* class_name);
static const RunTimeSharedClassInfo* find_record(RunTimeSharedDictionary* dict, Symbol* name);
// Called by PLATFORM/APP loader only
static InstanceKlass* find_or_load_shared_class(Symbol* class_name,
Handle class_loader,
@ -311,8 +246,7 @@ public:
return NULL;
}
static bool add_non_builtin_klass(Symbol* class_name, ClassLoaderData* loader_data,
InstanceKlass* k, TRAPS);
static bool add_unregistered_class(InstanceKlass* k, TRAPS);
static InstanceKlass* dump_time_resolve_super_or_fail(Symbol* child_name,
Symbol* class_name,
Handle class_loader,
@ -320,36 +254,17 @@ public:
bool is_superclass,
TRAPS);
static size_t dictionary_entry_size() {
return (DumpSharedSpaces) ? sizeof(SharedDictionaryEntry) : sizeof(DictionaryEntry);
}
static void init_shared_dictionary_entry(InstanceKlass* k, DictionaryEntry* entry) NOT_CDS_RETURN;
static bool is_builtin(DictionaryEntry* ent) {
// Can't use virtual function is_builtin because DictionaryEntry doesn't initialize
// vtable because it's not constructed properly.
SharedDictionaryEntry* entry = (SharedDictionaryEntry*)ent;
return entry->is_builtin();
}
// For convenient access to the SharedDictionaryEntry's of the archived classes.
static SharedDictionary* shared_dictionary() {
assert(!DumpSharedSpaces, "not for dumping");
return (SharedDictionary*)SystemDictionary::shared_dictionary();
}
static SharedDictionary* boot_loader_dictionary() {
return (SharedDictionary*)ClassLoaderData::the_null_class_loader_data()->dictionary();
}
static void update_shared_entry(InstanceKlass* klass, int id) {
assert(DumpSharedSpaces, "sanity");
assert((SharedDictionary*)(klass->class_loader_data()->dictionary()) != NULL, "sanity");
((SharedDictionary*)(klass->class_loader_data()->dictionary()))->update_entry(klass, id);
static void init_dumptime_info(InstanceKlass* k) NOT_CDS_RETURN;
static void remove_dumptime_info(InstanceKlass* k) NOT_CDS_RETURN;
static Dictionary* boot_loader_dictionary() {
return ClassLoaderData::the_null_class_loader_data()->dictionary();
}
static void update_shared_entry(InstanceKlass* klass, int id);
static void set_shared_class_misc_info(InstanceKlass* k, ClassFileStream* cfs);
static InstanceKlass* lookup_from_stream(const Symbol* class_name,
static InstanceKlass* lookup_from_stream(Symbol* class_name,
Handle class_loader,
Handle protection_domain,
const ClassFileStream* st,
@ -366,9 +281,23 @@ public:
static bool add_verification_constraint(InstanceKlass* k, Symbol* name,
Symbol* from_name, bool from_field_is_protected,
bool from_is_array, bool from_is_object) NOT_CDS_RETURN_(false);
static void finalize_verification_constraints() NOT_CDS_RETURN;
static void check_verification_constraints(InstanceKlass* klass,
TRAPS) NOT_CDS_RETURN;
TRAPS) NOT_CDS_RETURN;
static bool is_builtin(InstanceKlass* k) {
return (k->shared_classpath_index() != UNREGISTERED_INDEX);
}
static bool should_be_excluded(InstanceKlass* k);
static void check_excluded_classes();
static void validate_before_archiving(InstanceKlass* k);
static bool is_excluded_class(InstanceKlass* k);
static void dumptime_classes_do(class MetaspaceClosure* it);
static void write_to_archive();
static void serialize_dictionary_headers(class SerializeClosure* soc);
static void print() { return print_on(tty); }
static void print_on(outputStream* st) NOT_CDS_RETURN;
static void print_table_statistics(outputStream* st) NOT_CDS_RETURN;
DEBUG_ONLY(static bool checked_excluded_classes() {return _checked_excluded_classes;})
};
#endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP

@ -244,6 +244,7 @@ class ClassLoaderData;
class MetaspaceClosure;
class MetaspaceObj {
friend class VMStructs;
// When CDS is enabled, all shared metaspace objects are mapped
// into a single contiguous memory block, so we can use these
// two pointers to quickly determine if something is in the

@ -209,6 +209,7 @@ void FileMapHeader::populate(FileMapInfo* mapinfo, size_t alignment) {
_verify_local = BytecodeVerificationLocal;
_verify_remote = BytecodeVerificationRemote;
_has_platform_or_app_classes = ClassLoaderExt::has_platform_or_app_classes();
_shared_base_address = SharedBaseAddress;
}
void SharedClassPathEntry::init(const char* name, bool is_modules_image, TRAPS) {
@ -533,6 +534,7 @@ bool FileMapInfo::init_from_file(int fd) {
}
_file_offset += (long)n;
SharedBaseAddress = _header->_shared_base_address;
return true;
}
@ -666,7 +668,8 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
// +-- gap
size_t FileMapInfo::write_archive_heap_regions(GrowableArray<MemRegion> *heap_mem,
GrowableArray<ArchiveHeapOopmapInfo> *oopmaps,
int first_region_id, int max_num_regions) {
int first_region_id, int max_num_regions,
bool print_log) {
assert(max_num_regions <= 2, "Only support maximum 2 memory regions");
int arr_len = heap_mem == NULL ? 0 : heap_mem->length();
@ -687,8 +690,10 @@ size_t FileMapInfo::write_archive_heap_regions(GrowableArray<MemRegion> *heap_me
total_size += size;
}
log_info(cds)("Archive heap region %d " INTPTR_FORMAT " - " INTPTR_FORMAT " = " SIZE_FORMAT_W(8) " bytes",
i, p2i(start), p2i(start + size), size);
if (print_log) {
log_info(cds)("Archive heap region %d " INTPTR_FORMAT " - " INTPTR_FORMAT " = " SIZE_FORMAT_W(8) " bytes",
i, p2i(start), p2i(start + size), size);
}
write_region(i, start, size, false, false);
if (size > 0) {
space_at(i)->_oopmap = oopmaps->at(arr_idx)._oopmap;

@ -150,6 +150,7 @@ struct FileMapHeader : public CDSFileMapHeaderBase {
bool _verify_local; // BytecodeVerificationLocal setting
bool _verify_remote; // BytecodeVerificationRemote setting
bool _has_platform_or_app_classes; // Archive contains app classes
size_t _shared_base_address; // SharedBaseAddress used at dump time
void set_has_platform_or_app_classes(bool v) {
_has_platform_or_app_classes = v;
@ -263,7 +264,8 @@ public:
bool read_only, bool allow_exec);
size_t write_archive_heap_regions(GrowableArray<MemRegion> *heap_mem,
GrowableArray<ArchiveHeapOopmapInfo> *oopmaps,
int first_region_id, int max_num_regions);
int first_region_id, int max_num_regions,
bool print_log);
void write_bytes(const void* buffer, size_t count);
void write_bytes_aligned(const void* buffer, size_t count);
char* map_region(int i, char** top_ret);

@ -391,9 +391,7 @@ struct CopyKlassSubGraphInfoToArchive : StackObj {
record->init(&info);
unsigned int hash = primitive_hash<Klass*>(klass);
uintx deltax = MetaspaceShared::object_delta(record);
guarantee(deltax <= MAX_SHARED_DELTA, "must not be");
u4 delta = u4(deltax);
u4 delta = MetaspaceShared::object_delta_u4(record);
_writer->add(hash, delta);
}
return true; // keep on iterating
@ -417,7 +415,7 @@ void HeapShared::write_subgraph_info_table() {
int num_buckets = CompactHashtableWriter::default_num_buckets(d_table->_count);
CompactHashtableWriter writer(num_buckets, &stats);
CopyKlassSubGraphInfoToArchive copy(&writer);
_dump_time_subgraph_info_table->iterate(&copy);
d_table->iterate(&copy);
writer.dump(&_run_time_subgraph_info_table, "subgraphs");
}
@ -433,7 +431,7 @@ void HeapShared::initialize_from_archived_subgraph(Klass* k) {
assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
unsigned int hash = primitive_hash<Klass*>(k);
ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
// Initialize from archived data. Currently this is done only
// during VM initialization time. No lock is needed.

@ -109,9 +109,9 @@ class ArchivedKlassSubGraphInfoRecord {
ArchivedKlassSubGraphInfoRecord() :
_k(NULL), _entry_field_records(NULL), _subgraph_object_klasses(NULL) {}
void init(KlassSubGraphInfo* info);
Klass* klass() { return _k; }
Array<juint>* entry_field_records() { return _entry_field_records; }
Array<Klass*>* subgraph_object_klasses() { return _subgraph_object_klasses; }
Klass* klass() const { return _k; }
Array<juint>* entry_field_records() const { return _entry_field_records; }
Array<Klass*>* subgraph_object_klasses() const { return _subgraph_object_klasses; }
};
#endif // INCLUDE_CDS_JAVA_HEAP
@ -154,18 +154,16 @@ class HeapShared: AllStatic {
int _count;
};
inline static ArchivedKlassSubGraphInfoRecord* read_record_from_compact_hashtable(address base_address, u4 offset) {
return (ArchivedKlassSubGraphInfoRecord*)(base_address + offset);
}
inline static bool record_equals_compact_hashtable_entry(ArchivedKlassSubGraphInfoRecord* value, const Klass* key, int len_unused) {
public: // solaris compiler wants this for RunTimeKlassSubGraphInfoTable
inline static bool record_equals_compact_hashtable_entry(
const ArchivedKlassSubGraphInfoRecord* value, const Klass* key, int len_unused) {
return (value->klass() == key);
}
typedef CompactHashtable<
private:
typedef OffsetCompactHashtable<
const Klass*,
ArchivedKlassSubGraphInfoRecord*,
read_record_from_compact_hashtable,
const ArchivedKlassSubGraphInfoRecord*,
record_equals_compact_hashtable_entry
> RunTimeKlassSubGraphInfoTable;

@ -64,7 +64,6 @@
#include "utilities/align.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/hashtable.inline.hpp"
#if INCLUDE_G1GC
#include "gc/g1/g1CollectedHeap.hpp"
#endif
@ -124,6 +123,15 @@ private:
MetaspaceShared::report_out_of_space(_name, newtop - _top);
ShouldNotReachHere();
}
uintx delta = MetaspaceShared::object_delta_uintx(newtop);
if (delta > MAX_SHARED_DELTA) {
// This is just a sanity check and should not appear in any real world usage. This
// happens only if you allocate more than 2GB of shared objects and would require
// millions of shared classes.
vm_exit_during_initialization("Out of memory in the CDS archive",
"Please reduce the number of shared classes.");
}
MetaspaceShared::commit_shared_space_to(newtop);
_top = newtop;
return _top;
@ -323,6 +331,7 @@ void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
}
_mc_region.init(&_shared_rs);
SharedBaseAddress = (size_t)_shared_rs.base();
tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
_shared_rs.size(), p2i(_shared_rs.base()));
}
@ -416,6 +425,7 @@ void MetaspaceShared::serialize(SerializeClosure* soc) {
SymbolTable::serialize_shared_table_header(soc);
StringTable::serialize_shared_table_header(soc);
HeapShared::serialize_subgraph_info_table_header(soc);
SystemDictionaryShared::serialize_dictionary_headers(soc);
JavaClasses::serialize_offsets(soc);
InstanceMirrorKlass::serialize_offsets(soc);
@ -464,13 +474,11 @@ static void collect_array_classes(Klass* k) {
class CollectClassesClosure : public KlassClosure {
void do_klass(Klass* k) {
if (!(k->is_instance_klass() && InstanceKlass::cast(k)->is_in_error_state())) {
if (k->is_instance_klass() && InstanceKlass::cast(k)->signers() != NULL) {
// Mark any class with signers and don't add to the _global_klass_objects
k->set_has_signer_and_not_archived();
} else {
_global_klass_objects->append_if_missing(k);
}
if (k->is_instance_klass() &&
SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k))) {
// Don't add to the _global_klass_objects
} else {
_global_klass_objects->append_if_missing(k);
}
if (k->is_array_klass()) {
// Add in the array classes too
@ -577,16 +585,6 @@ static void relocate_cached_class_file() {
}
}
NOT_PRODUCT(
static void assert_not_unsafe_anonymous_class(InstanceKlass* k) {
assert(!(k->is_unsafe_anonymous()), "cannot archive unsafe anonymous classes");
}
// Unsafe anonymous classes are not stored inside any dictionaries.
static void assert_no_unsafe_anonymous_classes_in_dictionaries() {
ClassLoaderDataGraph::dictionary_classes_do(assert_not_unsafe_anonymous_class);
})
// Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables.
// (In GCC this is the field <Type>::_vptr, i.e., first word in the object.)
//
@ -1123,6 +1121,17 @@ public:
newtop = _ro_region.top();
} else {
oldtop = _rw_region.top();
if (ref->msotype() == MetaspaceObj::ClassType) {
// Save a pointer immediate in front of an InstanceKlass, so
// we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo*
// without building another hashtable. See RunTimeSharedClassInfo::get_for()
// in systemDictionaryShared.cpp.
Klass* klass = (Klass*)obj;
if (klass->is_instance_klass()) {
SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
_rw_region.allocate(sizeof(address), BytesPerWord);
}
}
p = _rw_region.allocate(bytes, alignment);
newtop = _rw_region.top();
}
@ -1132,16 +1141,6 @@ public:
assert(isnew, "must be");
_alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only);
if (ref->msotype() == MetaspaceObj::SymbolType) {
uintx delta = MetaspaceShared::object_delta(p);
if (delta > MAX_SHARED_DELTA) {
// This is just a sanity check and should not appear in any real world usage. This
// happens only if you allocate more than 2GB of Symbols and would require
// millions of shared classes.
vm_exit_during_initialization("Too many Symbols in the CDS archive",
"Please reduce the number of shared classes.");
}
}
}
static address get_new_loc(MetaspaceClosure::Ref* ref) {
@ -1281,7 +1280,7 @@ public:
}
}
FileMapInfo::metaspace_pointers_do(it);
SystemDictionary::classes_do(it);
SystemDictionaryShared::dumptime_classes_do(it);
Universe::metaspace_pointers_do(it);
SymbolTable::metaspace_pointers_do(it);
vmSymbols::metaspace_pointers_do(it);
@ -1315,9 +1314,6 @@ void VM_PopulateDumpSharedSpace::dump_symbols() {
char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
ArchiveCompactor::OtherROAllocMark mark;
// Reorder the system dictionary. Moving the symbols affects
// how the hash table indices are calculated.
SystemDictionary::reorder_dictionary_for_sharing();
tty->print("Removing java_mirror ... ");
if (!HeapShared::is_heap_object_archiving_allowed()) {
@ -1325,15 +1321,10 @@ char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
}
remove_java_mirror_in_classes();
tty->print_cr("done. ");
NOT_PRODUCT(SystemDictionary::verify();)
size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets();
char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t));
SystemDictionary::copy_buckets(buckets_top, _ro_region.top());
SystemDictionaryShared::write_to_archive();
size_t table_bytes = SystemDictionary::count_bytes_for_table();
char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t));
SystemDictionary::copy_table(table_top, _ro_region.top());
char* start = _ro_region.top();
// Write the other data to the output array.
WriteClosure wc(&_ro_region);
@ -1342,7 +1333,7 @@ char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
// Write the bitmaps for patching the archive heap regions
dump_archive_heap_oopmaps();
return buckets_top;
return start;
}
void VM_PopulateDumpSharedSpace::doit() {
@ -1367,14 +1358,11 @@ void VM_PopulateDumpSharedSpace::doit() {
"loader constraints are not saved");
guarantee(SystemDictionary::placeholders()->number_of_entries() == 0,
"placeholders are not saved");
// Revisit and implement this if we prelink method handle call sites:
guarantee(SystemDictionary::invoke_method_table() == NULL ||
SystemDictionary::invoke_method_table()->number_of_entries() == 0,
"invoke method table is not saved");
// At this point, many classes have been loaded.
// Gather systemDictionary classes in a global array and do everything to
// that so we don't have to walk the SystemDictionary again.
SystemDictionaryShared::check_excluded_classes();
_global_klass_objects = new GrowableArray<Klass*>(1000);
CollectClassesClosure collect_classes;
ClassLoaderDataGraph::loaded_classes_do(&collect_classes);
@ -1403,21 +1391,11 @@ void VM_PopulateDumpSharedSpace::doit() {
rewrite_nofast_bytecodes_and_calculate_fingerprints();
tty->print_cr("done. ");
// Move classes from platform/system dictionaries into the boot dictionary
SystemDictionary::combine_shared_dictionaries();
// Make sure all classes have a correct loader type.
ClassLoaderData::the_null_class_loader_data()->dictionary()->classes_do(MetaspaceShared::check_shared_class_loader_type);
// Remove all references outside the metadata
tty->print("Removing unshareable information ... ");
remove_unshareable_in_classes();
tty->print_cr("done. ");
// We don't support archiving unsafe anonymous classes. Verify that they are not stored in
// any dictionaries.
NOT_PRODUCT(assert_no_unsafe_anonymous_classes_in_dictionaries());
ArchiveCompactor::initialize();
ArchiveCompactor::copy_and_compact();
@ -1466,6 +1444,7 @@ void VM_PopulateDumpSharedSpace::doit() {
mapinfo->set_core_spaces_size(core_spaces_size);
for (int pass=1; pass<=2; pass++) {
bool print_archive_log = (pass==1);
if (pass == 1) {
// The first pass doesn't actually write the data to disk. All it
// does is to update the fields in the mapinfo->_header.
@ -1490,12 +1469,14 @@ void VM_PopulateDumpSharedSpace::doit() {
_closed_archive_heap_regions,
_closed_archive_heap_oopmaps,
MetaspaceShared::first_closed_archive_heap_region,
MetaspaceShared::max_closed_archive_heap_region);
MetaspaceShared::max_closed_archive_heap_region,
print_archive_log);
_total_open_archive_region_size = mapinfo->write_archive_heap_regions(
_open_archive_heap_regions,
_open_archive_heap_oopmaps,
MetaspaceShared::first_open_archive_heap_region,
MetaspaceShared::max_open_archive_heap_region);
MetaspaceShared::max_open_archive_heap_region,
print_archive_log);
}
mapinfo->close();
@ -1608,17 +1589,6 @@ class CheckSharedClassesClosure : public KlassClosure {
}
};
void MetaspaceShared::check_shared_class_loader_type(InstanceKlass* ik) {
ResourceMark rm;
if (ik->shared_classpath_index() == UNREGISTERED_INDEX) {
guarantee(ik->loader_type() == 0,
"Class loader type must not be set for this class %s", ik->name()->as_C_string());
} else {
guarantee(ik->loader_type() != 0,
"Class loader type must be set for this class %s", ik->name()->as_C_string());
}
}
void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) {
// We need to iterate because verification may cause additional classes
// to be loaded.
@ -1639,9 +1609,6 @@ void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) {
check_closure.reset();
ClassLoaderDataGraph::unlocked_loaded_classes_do(&check_closure);
} while (check_closure.made_progress());
// Unverifiable classes will not be included in the CDS archive.
SystemDictionary::remove_classes_in_error_state();
}
}
@ -1717,10 +1684,6 @@ void MetaspaceShared::preload_and_dump(TRAPS) {
link_and_cleanup_shared_classes(CATCH);
tty->print_cr("Rewriting and linking classes: done");
SystemDictionary::clear_invoke_method_table();
SystemDictionaryShared::finalize_verification_constraints();
VM_PopulateDumpSharedSpace op;
VMThread::execute(&op);
}
@ -1731,36 +1694,36 @@ int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) {
ClassListParser parser(class_list_path);
int class_count = 0;
while (parser.parse_one_line()) {
Klass* klass = ClassLoaderExt::load_one_class(&parser, THREAD);
if (HAS_PENDING_EXCEPTION) {
if (klass == NULL &&
(PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) {
// print a warning only when the pending exception is class not found
tty->print_cr("Preload Warning: Cannot find %s", parser.current_class_name());
}
CLEAR_PENDING_EXCEPTION;
}
if (klass != NULL) {
if (log_is_enabled(Trace, cds)) {
ResourceMark rm;
log_trace(cds)("Shared spaces preloaded: %s", klass->external_name());
}
if (klass->is_instance_klass()) {
InstanceKlass* ik = InstanceKlass::cast(klass);
// Link the class to cause the bytecodes to be rewritten and the
// cpcache to be created. The linking is done as soon as classes
// are loaded in order that the related data structures (klass and
// cpCache) are located together.
try_link_class(ik, THREAD);
guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
}
class_count++;
while (parser.parse_one_line()) {
Klass* klass = parser.load_current_class(THREAD);
if (HAS_PENDING_EXCEPTION) {
if (klass == NULL &&
(PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) {
// print a warning only when the pending exception is class not found
tty->print_cr("Preload Warning: Cannot find %s", parser.current_class_name());
}
CLEAR_PENDING_EXCEPTION;
}
if (klass != NULL) {
if (log_is_enabled(Trace, cds)) {
ResourceMark rm;
log_trace(cds)("Shared spaces preloaded: %s", klass->external_name());
}
if (klass->is_instance_klass()) {
InstanceKlass* ik = InstanceKlass::cast(klass);
// Link the class to cause the bytecodes to be rewritten and the
// cpcache to be created. The linking is done as soon as classes
// are loaded in order that the related data structures (klass and
// cpCache) are located together.
try_link_class(ik, THREAD);
guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
}
class_count++;
}
}
return class_count;
}
@ -1994,21 +1957,6 @@ void MetaspaceShared::initialize_shared_spaces() {
// The rest of the data is now stored in the RW region
buffer = mapinfo->read_only_tables_start();
int sharedDictionaryLen = *(intptr_t*)buffer;
buffer += sizeof(intptr_t);
int number_of_entries = *(intptr_t*)buffer;
buffer += sizeof(intptr_t);
SystemDictionary::set_shared_dictionary((HashtableBucket<mtClass>*)buffer,
sharedDictionaryLen,
number_of_entries);
buffer += sharedDictionaryLen;
// The following data are the linked list elements
// (HashtableEntry objects) for the shared dictionary table.
int len = *(intptr_t*)buffer; // skip over shared dictionary entries
buffer += sizeof(intptr_t);
buffer += len;
// Verify various attributes of the archive, plus initialize the
// shared string/symbol tables
@ -2027,7 +1975,7 @@ void MetaspaceShared::initialize_shared_spaces() {
if (PrintSharedArchiveAndExit) {
if (PrintSharedDictionary) {
tty->print_cr("\nShared classes:\n");
SystemDictionary::print_shared(tty);
SystemDictionaryShared::print_on(tty);
}
if (_archive_loading_failed) {
tty->print_cr("archive is invalid");

@ -107,12 +107,19 @@ class MetaspaceShared : AllStatic {
static void post_initialize(TRAPS) NOT_CDS_RETURN;
// Delta of this object from the bottom of the archive.
static uintx object_delta(void* obj) {
static uintx object_delta_uintx(void* obj) {
assert(DumpSharedSpaces, "supported only for dumping");
assert(shared_rs()->contains(obj), "must be");
address base_address = address(shared_rs()->base());
uintx delta = address(obj) - base_address;
return delta;
uintx deltax = address(obj) - base_address;
return deltax;
}
static u4 object_delta_u4(void* obj) {
// offset is guaranteed to be less than MAX_SHARED_DELTA in DumpRegion::expand_top_to()
uintx deltax = object_delta_uintx(obj);
guarantee(deltax <= MAX_SHARED_DELTA, "must be 32-bit offset");
return (u4)deltax;
}
static void set_archive_loading_failed() {

@ -422,17 +422,22 @@ InstanceKlass::InstanceKlass(const ClassFileParser& parser, unsigned kind, Klass
_static_field_size(parser.static_field_size()),
_nonstatic_oop_map_size(nonstatic_oop_map_size(parser.total_oop_map_count())),
_itable_len(parser.itable_size()),
_reference_type(parser.reference_type()) {
set_vtable_length(parser.vtable_size());
set_kind(kind);
set_access_flags(parser.access_flags());
set_is_unsafe_anonymous(parser.is_unsafe_anonymous());
set_layout_helper(Klass::instance_layout_helper(parser.layout_size(),
_reference_type(parser.reference_type())
{
set_vtable_length(parser.vtable_size());
set_kind(kind);
set_access_flags(parser.access_flags());
set_is_unsafe_anonymous(parser.is_unsafe_anonymous());
set_layout_helper(Klass::instance_layout_helper(parser.layout_size(),
false));
assert(NULL == _methods, "underlying memory not zeroed?");
assert(is_instance_klass(), "is layout incorrect?");
assert(size_helper() == parser.layout_size(), "incorrect size_helper?");
assert(NULL == _methods, "underlying memory not zeroed?");
assert(is_instance_klass(), "is layout incorrect?");
assert(size_helper() == parser.layout_size(), "incorrect size_helper?");
if (DumpSharedSpaces) {
SystemDictionaryShared::init_dumptime_info(this);
}
}
void InstanceKlass::deallocate_methods(ClassLoaderData* loader_data,
@ -579,6 +584,10 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) {
MetadataFactory::free_metadata(loader_data, annotations());
}
set_annotations(NULL);
if (DumpSharedSpaces) {
SystemDictionaryShared::remove_dumptime_info(this);
}
}
bool InstanceKlass::should_be_initialized() const {

@ -176,8 +176,7 @@ private:
// Flags of the current shared class.
u2 _shared_class_flags;
enum {
_has_raw_archived_mirror = 1,
_has_signer_and_not_archived = 1 << 2
_has_raw_archived_mirror = 1
};
#endif
// The _archived_mirror is set at CDS dump time pointing to the cached mirror
@ -314,15 +313,6 @@ protected:
CDS_ONLY(return (_shared_class_flags & _has_raw_archived_mirror) != 0;)
NOT_CDS(return false;)
}
#if INCLUDE_CDS
void set_has_signer_and_not_archived() {
_shared_class_flags |= _has_signer_and_not_archived;
}
bool has_signer_and_not_archived() const {
assert(DumpSharedSpaces, "dump time only");
return (_shared_class_flags & _has_signer_and_not_archived) != 0;
}
#endif // INCLUDE_CDS
// Obtain the module or package for this class
virtual ModuleEntry* module() const = 0;

@ -404,6 +404,8 @@ typedef PaddedEnd<ObjectMonitor> PaddedObjectMonitor;
/* Memory */ \
/**********/ \
\
static_field(MetaspaceObj, _shared_metaspace_base, void*) \
static_field(MetaspaceObj, _shared_metaspace_top, void*) \
nonstatic_field(ThreadLocalAllocBuffer, _start, HeapWord*) \
nonstatic_field(ThreadLocalAllocBuffer, _top, HeapWord*) \
nonstatic_field(ThreadLocalAllocBuffer, _end, HeapWord*) \
@ -460,7 +462,6 @@ typedef PaddedEnd<ObjectMonitor> PaddedObjectMonitor;
/* SystemDictionary */ \
/********************/ \
\
static_field(SystemDictionary, _shared_dictionary, Dictionary*) \
static_field(SystemDictionary, _system_loader_lock_obj, oop) \
static_field(SystemDictionary, WK_KLASS(Object_klass), InstanceKlass*) \
static_field(SystemDictionary, WK_KLASS(String_klass), InstanceKlass*) \

@ -33,7 +33,6 @@
#include "classfile/stringTable.hpp"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "oops/weakHandle.inline.hpp"
@ -99,11 +98,7 @@ template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::allocate_n
template <MEMFLAGS F> void BasicHashtable<F>::free_buckets() {
if (NULL != _buckets) {
// Don't delete the buckets in the shared space. They aren't
// allocated by os::malloc
if (!MetaspaceShared::is_in_shared_metaspace(_buckets)) {
FREE_C_HEAP_ARRAY(HashtableBucket, _buckets);
}
FREE_C_HEAP_ARRAY(HashtableBucket, _buckets);
_buckets = NULL;
}
}
@ -137,47 +132,6 @@ template <MEMFLAGS F> void BasicHashtable<F>::bulk_free_entries(BucketUnlinkCont
}
Atomic::add(-context->_num_removed, &_number_of_entries);
}
// Copy the table to the shared space.
template <MEMFLAGS F> size_t BasicHashtable<F>::count_bytes_for_table() {
size_t bytes = 0;
bytes += sizeof(intptr_t); // len
for (int i = 0; i < _table_size; ++i) {
for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr();
*p != NULL;
p = (*p)->next_addr()) {
bytes += entry_size();
}
}
return bytes;
}
// Dump the hash table entries (into CDS archive)
template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char* top, char* end) {
assert(is_aligned(top, sizeof(intptr_t)), "bad alignment");
intptr_t *plen = (intptr_t*)(top);
top += sizeof(*plen);
int i;
for (i = 0; i < _table_size; ++i) {
for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr();
*p != NULL;
p = (*p)->next_addr()) {
*p = (BasicHashtableEntry<F>*)memcpy(top, (void*)*p, entry_size());
top += entry_size();
}
}
*plen = (char*)(top) - (char*)plen - sizeof(*plen);
assert(top == end, "count_bytes_for_table is wrong");
// Set the shared bit.
for (i = 0; i < _table_size; ++i) {
for (BasicHashtableEntry<F>* p = bucket(i); p != NULL; p = p->next()) {
p->set_shared();
}
}
}
// For oops and Strings the size of the literal is interesting. For other types, nobody cares.
static int literal_size(ConstantPool*) { return 0; }
@ -297,34 +251,6 @@ template <class T, MEMFLAGS F> void Hashtable<T, F>::print_table_statistics(outp
st->print_cr("Maximum bucket size : %9d", (int)summary.maximum());
}
// Dump the hash table buckets.
template <MEMFLAGS F> size_t BasicHashtable<F>::count_bytes_for_buckets() {
size_t bytes = 0;
bytes += sizeof(intptr_t); // len
bytes += sizeof(intptr_t); // _number_of_entries
bytes += _table_size * sizeof(HashtableBucket<F>); // the buckets
return bytes;
}
// Dump the buckets (into CDS archive)
template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char* top, char* end) {
assert(is_aligned(top, sizeof(intptr_t)), "bad alignment");
intptr_t len = _table_size * sizeof(HashtableBucket<F>);
*(intptr_t*)(top) = len;
top += sizeof(intptr_t);
*(intptr_t*)(top) = _number_of_entries;
top += sizeof(intptr_t);
_buckets = (HashtableBucket<F>*)memcpy(top, (void*)_buckets, len);
top += len;
assert(top == end, "count_bytes_for_buckets is wrong");
}
#ifndef PRODUCT
template <class T> void print_literal(T l) {
l->print();

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -147,12 +147,6 @@ public:
BasicHashtable(int table_size, int entry_size,
HashtableBucket<F>* buckets, int number_of_entries);
// Sharing support.
size_t count_bytes_for_buckets();
size_t count_bytes_for_table();
void copy_buckets(char* top, char* end);
void copy_table(char* top, char* end);
// Bucket handling
int hash_to_index(unsigned int full_hash) const {
int h = full_hash % _table_size;

@ -0,0 +1,56 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.memory;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
public class MetaspaceObj {
private static Address sharedMetaspaceBaseAddr;
private static Address sharedMetaspaceTopAddr;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("MetaspaceObj");
sharedMetaspaceBaseAddr = type.getAddressField("_shared_metaspace_base").getStaticFieldAddress();
sharedMetaspaceTopAddr = type.getAddressField("_shared_metaspace_top").getStaticFieldAddress();
}
public static boolean isShared(Address addr) {
Address base = sharedMetaspaceBaseAddr.getAddressAt(0);
Address top = sharedMetaspaceTopAddr. getAddressAt(0);
return base.lessThanOrEqual(addr) && addr.lessThan(top);
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,6 @@ import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
public class SystemDictionary {
private static AddressField sharedDictionaryField;
private static sun.jvm.hotspot.types.OopField javaSystemLoaderField;
private static AddressField objectKlassField;
@ -54,7 +53,6 @@ public class SystemDictionary {
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("SystemDictionary");
sharedDictionaryField = type.getAddressField("_shared_dictionary");
javaSystemLoaderField = type.getOopField("_java_system_loader");
objectKlassField = type.getAddressField(WK_KLASS("Object_klass"));
@ -76,11 +74,6 @@ public class SystemDictionary {
return (kname+"_knum");
}
public Dictionary sharedDictionary() {
Address tmp = sharedDictionaryField.getValue();
return (Dictionary) VMObjectFactory.newObject(Dictionary.class, tmp);
}
// few well known classes -- not all are added here.
// add more if needed.
public static InstanceKlass getThreadKlass() {

@ -313,27 +313,6 @@ public class InstanceKlass extends Klass {
return shouldStoreFingerprint() || isShared();
}
public boolean isShared() {
VM vm = VM.getVM();
if (vm.isSharingEnabled()) {
// This is not the same implementation as the C++ function MetaspaceObj::is_shared()
// bool MetaspaceObj::is_shared() const {
// return MetaspaceShared::is_in_shared_space(this);
// }
// However, MetaspaceShared::is_in_shared_space is complicated and hard to emulate in
// Java code, so let's do this by looking up from the shared dictionary. Of course,
// this works for shared InstanceKlass only and does not work for other types of
// MetaspaceObj in the CDS shared archive.
Dictionary sharedDictionary = vm.getSystemDictionary().sharedDictionary();
if (sharedDictionary != null) {
if (sharedDictionary.contains(this)) {
return true;
}
}
}
return false;
}
public static long getHeaderSize() { return headerSize; }
public short getFieldAccessFlags(int index) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@ package sun.jvm.hotspot.oops;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.memory.*;
import sun.jvm.hotspot.utilities.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
@ -87,4 +88,12 @@ abstract public class Metadata extends VMObject {
public void dumpReplayData(PrintStream out) {
out.println("# Unknown Metadata");
}
public boolean isShared() {
VM vm = VM.getVM();
if (vm.isSharingEnabled()) {
return MetaspaceObj.isShared(getAddress());
}
return false;
}
}

@ -61,7 +61,7 @@ public class CDSandJFR {
String appJar = ClassFileInstaller.getJarPath("CDSandJFR.jar");
OutputAnalyzer output;
output = TestCommon.testDump(appJar, TestCommon.list(classes));
TestCommon.checkDump(output, "Skipping JFR event class jdk/jfr/");
TestCommon.checkDump(output, "Skipping jdk/jfr/Event: JFR event class");
output = TestCommon.exec(appJar,
"-XX:StartFlightRecording=dumponexit=true",

@ -46,7 +46,7 @@ public class SignedJar {
String signedJar = TestCommon.getTestJar("signed_hello.jar");
OutputAnalyzer output;
output = TestCommon.dump(signedJar, TestCommon.list("Hello"));
TestCommon.checkDump(output, "Preload Warning: Skipping Hello from signed JAR");
TestCommon.checkDump(output, "Skipping Hello: Signed JAR");
// At runtime, the Hello class should be loaded from the jar file
// instead of from the shared archive since a class from a signed
@ -63,6 +63,6 @@ public class SignedJar {
// Test class exists in both signed JAR and unsigned JAR
String jars = signedJar + System.getProperty("path.separator") + unsignedJar;
output = TestCommon.dump(jars, TestCommon.list("Hello"));
TestCommon.checkDump(output, "Preload Warning: Skipping Hello from signed JAR");
TestCommon.checkDump(output, "Skipping Hello: Signed JAR");
}
}