8072061: Automatically determine optimal sizes for the CDS regions

See new C++ class MetaspaceClosure.

Reviewed-by: coleenp, jiangli, mseledtsov
This commit is contained in:
Ioi Lam 2017-08-02 18:06:38 -07:00
parent 4b983a51fd
commit f7f193ae71
82 changed files with 2010 additions and 1622 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -65,13 +65,11 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
return false;
}
#if INCLUDE_CDS
if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) {
// In the middle of a trampoline call. Bail out for safety.
// This happens rarely so shouldn't affect profiling.
return false;
}
#endif
frame ret_frame(ret_sp, ret_fp, addr.pc());
if (!ret_frame.safe_for_sender(jt)) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -67,13 +67,11 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
return false;
}
#if INCLUDE_CDS
if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) {
// In the middle of a trampoline call. Bail out for safety.
// This happens rarely so shouldn't affect profiling.
return false;
}
#endif
frame ret_frame(ret_sp, ret_fp, addr.pc());
if (!ret_frame.safe_for_sender(jt)) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -94,13 +94,11 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
return false;
}
#if INCLUDE_CDS
if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) {
// In the middle of a trampoline call. Bail out for safety.
// This happens rarely so shouldn't affect profiling.
return false;
}
#endif
frame ret_frame(ret_sp, ret_fp, addr.pc());
if (!ret_frame.safe_for_sender(jt)) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -65,13 +65,11 @@ bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
return false;
}
#if INCLUDE_CDS
if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) {
// In the middle of a trampoline call. Bail out for safety.
// This happens rarely so shouldn't affect profiling.
return false;
}
#endif
// we were running Java code when SIGPROF came in
if (isInJava) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,13 +66,11 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
return false;
}
#if INCLUDE_CDS
if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) {
// In the middle of a trampoline call. Bail out for safety.
// This happens rarely so shouldn't affect profiling.
return false;
}
#endif
frame ret_frame(ret_sp, ret_fp, addr.pc());
if (!ret_frame.safe_for_sender(jt)) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,13 +78,11 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr,
return false;
}
#if INCLUDE_CDS
if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) {
// In the middle of a trampoline call. Bail out for safety.
// This happens rarely so shouldn't affect profiling.
return false;
}
#endif
frame ret_frame(ret_sp, frame::unpatchable, addr.pc());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -70,13 +70,11 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr,
return false;
}
#if INCLUDE_CDS
if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) {
// In the middle of a trampoline call. Bail out for safety.
// This happens rarely so shouldn't affect profiling.
return false;
}
#endif
// If sp and fp are nonsense just leave them out

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -73,13 +73,11 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
return false;
}
#if INCLUDE_CDS
if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) {
// In the middle of a trampoline call. Bail out for safety.
// This happens rarely so shouldn't affect profiling.
return false;
}
#endif
frame ret_frame(ret_sp, ret_fp, addr.pc());
if (!ret_frame.safe_for_sender(jt)) {

View File

@ -397,6 +397,7 @@ class ClassLoader: AllStatic {
static int compute_Object_vtable();
static ClassPathEntry* classpath_entry(int n) {
assert(n >= 0 && n < _num_entries, "sanity");
if (n == 0) {
assert(has_jrt_entry(), "No class path entry at 0 for exploded module builds");
return ClassLoader::_jrt_entry;
@ -415,6 +416,10 @@ class ClassLoader: AllStatic {
}
}
static int number_of_classpath_entries() {
return _num_entries;
}
static bool is_in_patch_mod_entries(Symbol* module_name);
#if INCLUDE_CDS

View File

@ -726,7 +726,6 @@ bool ClassLoaderData::is_builtin_class_loader_data() const {
}
Metaspace* ClassLoaderData::metaspace_non_null() {
assert(!DumpSharedSpaces, "wrong metaspace!");
// If the metaspace has not been allocated, create a new one. Might want
// to create smaller arena for Reflection class loaders also.
// The reason for the delayed allocation is because some class loaders are
@ -1315,37 +1314,6 @@ void ClassLoaderDataGraph::post_class_unload_events() {
#endif
}
// CDS support
// Global metaspaces for writing information to the shared archive. When
// application CDS is supported, we may need one per metaspace, so this
// sort of looks like it.
Metaspace* ClassLoaderData::_ro_metaspace = NULL;
Metaspace* ClassLoaderData::_rw_metaspace = NULL;
static bool _shared_metaspaces_initialized = false;
// Initialize shared metaspaces (change to call from somewhere not lazily)
void ClassLoaderData::initialize_shared_metaspaces() {
assert(DumpSharedSpaces, "only use this for dumping shared spaces");
assert(this == ClassLoaderData::the_null_class_loader_data(),
"only supported for null loader data for now");
assert (!_shared_metaspaces_initialized, "only initialize once");
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
_ro_metaspace = new Metaspace(_metaspace_lock, Metaspace::ROMetaspaceType);
_rw_metaspace = new Metaspace(_metaspace_lock, Metaspace::ReadWriteMetaspaceType);
_shared_metaspaces_initialized = true;
}
Metaspace* ClassLoaderData::ro_metaspace() {
assert(_ro_metaspace != NULL, "should already be initialized");
return _ro_metaspace;
}
Metaspace* ClassLoaderData::rw_metaspace() {
assert(_rw_metaspace != NULL, "should already be initialized");
return _rw_metaspace;
}
ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic()
: _next_klass(NULL) {
ClassLoaderData* cld = ClassLoaderDataGraph::_head;

View File

@ -314,9 +314,6 @@ class ClassLoaderData : public CHeapObj<mtClass> {
_the_null_class_loader_data = new ClassLoaderData(Handle(), false, Dependencies());
ClassLoaderDataGraph::_head = _the_null_class_loader_data;
assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be");
if (DumpSharedSpaces) {
_the_null_class_loader_data->initialize_shared_metaspaces();
}
}
bool is_the_null_class_loader_data() const {
@ -387,11 +384,6 @@ class ClassLoaderData : public CHeapObj<mtClass> {
static ClassLoaderData* anonymous_class_loader_data(oop loader, TRAPS);
static void print_loader(ClassLoaderData *loader_data, outputStream *out);
// CDS support
Metaspace* ro_metaspace();
Metaspace* rw_metaspace();
void initialize_shared_metaspaces();
TRACE_DEFINE_TRACE_ID_METHODS;
};

View File

@ -48,8 +48,6 @@ CompactHashtableWriter::CompactHashtableWriter(int num_buckets,
_buckets[i] = new (ResourceObj::C_HEAP, mtSymbol) GrowableArray<Entry>(0, true, mtSymbol);
}
stats->bucket_count = _num_buckets;
stats->bucket_bytes = (_num_buckets + 1) * (sizeof(u4));
_stats = stats;
_compact_buckets = NULL;
_compact_entries = NULL;
@ -91,13 +89,13 @@ void CompactHashtableWriter::allocate_table() {
"Too many entries.");
}
Thread* THREAD = VMThread::vm_thread();
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
_compact_buckets = MetadataFactory::new_array<u4>(loader_data, _num_buckets + 1, THREAD);
_compact_entries = MetadataFactory::new_array<u4>(loader_data, entries_space, THREAD);
_compact_buckets = MetaspaceShared::new_ro_array<u4>(_num_buckets + 1);
_compact_entries = MetaspaceShared::new_ro_array<u4>(entries_space);
_stats->bucket_count = _num_buckets;
_stats->bucket_bytes = _compact_buckets->size() * BytesPerWord;
_stats->hashentry_count = _num_entries;
_stats->hashentry_bytes = entries_space * sizeof(u4);
_stats->hashentry_bytes = _compact_entries->size() * BytesPerWord;
}
// Write the compact table's buckets
@ -177,12 +175,11 @@ void CompactHashtableWriter::dump(SimpleCompactHashtable *cht, const char* table
// Customization for dumping Symbol and String tables
void CompactSymbolTableWriter::add(unsigned int hash, Symbol *symbol) {
address base_address = address(MetaspaceShared::shared_rs()->base());
uintx deltax = address(symbol) - base_address;
// The symbols are in RO space, which is smaler than MAX_SHARED_DELTA.
// The assert below is just to be extra cautious.
assert(deltax <= MAX_SHARED_DELTA, "the delta is too large to encode");
uintx deltax = MetaspaceShared::object_delta(symbol);
// When the symbols are stored into the archive, we already check that
// they won't be more than MAX_SHARED_DELTA from the base address, or
// else the dumping would have been aborted.
assert(deltax <= MAX_SHARED_DELTA, "must not be");
u4 delta = u4(deltax);
CompactHashtableWriter::add(hash, delta);
@ -243,7 +240,6 @@ bool SimpleCompactHashtable::exists(u4 value) {
template <class I>
inline void SimpleCompactHashtable::iterate(const I& iterator) {
assert(!DumpSharedSpaces, "run-time only");
for (u4 i = 0; i < _bucket_count; i++) {
u4 bucket_info = _buckets[i];
u4 bucket_offset = BUCKET_OFFSET(bucket_info);

View File

@ -32,6 +32,7 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/iterator.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
@ -241,6 +242,20 @@ void Dictionary::all_entries_do(void f(InstanceKlass*, ClassLoaderData*)) {
}
}
// Used to scan and relocate the classes during CDS archive dump.
void Dictionary::classes_do(MetaspaceClosure* it) {
assert(DumpSharedSpaces, "dump-time only");
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry* probe = bucket(index);
probe != NULL;
probe = probe->next()) {
it->push(probe->klass_addr());
((SharedDictionaryEntry*)probe)->metaspace_pointers_do(it);
}
}
}
// Add a loaded class to the dictionary.
// Readers of the SystemDictionary aren't always locked, so _buckets
@ -342,7 +357,7 @@ bool Dictionary::is_valid_protection_domain(int index, unsigned int hash,
}
void Dictionary::reorder_dictionary() {
void Dictionary::reorder_dictionary_for_sharing() {
// Copy all the dictionary entries into a single master list.

View File

@ -84,6 +84,7 @@ public:
void classes_do(void f(InstanceKlass*));
void classes_do(void f(InstanceKlass*, TRAPS), TRAPS);
void all_entries_do(void f(InstanceKlass*, ClassLoaderData*));
void classes_do(MetaspaceClosure* it);
void unlink(BoolObjectClosure* is_alive);
void remove_classes_in_error_state();
@ -101,7 +102,7 @@ public:
Handle protection_domain, TRAPS);
// Sharing support
void reorder_dictionary();
void reorder_dictionary_for_sharing();
void print_on(outputStream* st) const;
void verify();
@ -142,6 +143,7 @@ class DictionaryEntry : public HashtableEntry<InstanceKlass*, mtClass> {
void add_protection_domain(Dictionary* dict, Handle protection_domain);
InstanceKlass* instance_klass() const { return literal(); }
InstanceKlass** klass_addr() { return (InstanceKlass**)literal_addr(); }
DictionaryEntry* next() const {
return (DictionaryEntry*)HashtableEntry<InstanceKlass*, mtClass>::next();
@ -300,9 +302,6 @@ public:
void methods_do(void f(Method*));
// Sharing support
void reorder_dictionary();
void verify();
};
#endif // SHARE_VM_CLASSFILE_DICTIONARY_HPP

View File

@ -74,7 +74,7 @@ InstanceKlass* KlassFactory::check_shared_class_file_load_hook(
(SharedClassPathEntry*)FileMapInfo::shared_classpath(path_index);
ClassFileStream* stream = new ClassFileStream(ptr,
end_ptr - ptr,
ent == NULL ? NULL : ent->_name,
ent == NULL ? NULL : ent->name(),
ClassFileStream::verify);
ClassFileParser parser(stream,
class_name,
@ -229,8 +229,7 @@ InstanceKlass* KlassFactory::create_from_stream(ClassFileStream* stream,
len = stream->length();
bytes = stream->buffer();
}
p = (JvmtiCachedClassFileData*)MetaspaceShared::optional_data_space_alloc(
offset_of(JvmtiCachedClassFileData, data) + len);
p = (JvmtiCachedClassFileData*)os::malloc(offset_of(JvmtiCachedClassFileData, data) + len, mtInternal);
p->length = len;
memcpy(p->data, bytes, len);
result->set_archived_class_data(p);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,13 +52,7 @@ public:
}
static void update_shared_classpath(ClassPathEntry *cpe,
SharedClassPathEntry* ent,
time_t timestamp,
long filesize, TRAPS) {
ent->_timestamp = timestamp;
ent->_filesize = filesize;
}
SharedClassPathEntry* ent, TRAPS) {}
static void initialize(TRAPS) {}
inline static bool is_shared_boot_class(Klass* klass) {

View File

@ -33,6 +33,7 @@
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
@ -731,6 +732,9 @@ bool StringTable::copy_shared_string(GrowableArray<MemRegion> *string_space,
// add to the compact table
writer->add(hash, new_s);
MetaspaceShared::relocate_klass_ptr(new_s);
MetaspaceShared::relocate_klass_ptr(new_v);
}
}
@ -740,35 +744,33 @@ bool StringTable::copy_shared_string(GrowableArray<MemRegion> *string_space,
return true;
}
void StringTable::serialize(SerializeClosure* soc, GrowableArray<MemRegion> *string_space,
size_t* space_size) {
#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
void StringTable::write_to_archive(GrowableArray<MemRegion> *string_space) {
#if INCLUDE_CDS
_shared_table.reset();
if (soc->writing()) {
if (!(UseG1GC && UseCompressedOops && UseCompressedClassPointers)) {
if (!(UseG1GC && UseCompressedOops && UseCompressedClassPointers)) {
log_info(cds)(
"Shared strings are excluded from the archive as UseG1GC, "
"UseCompressedOops and UseCompressedClassPointers are required."
"Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
BOOL_TO_STR(UseCompressedClassPointers));
} else {
int num_buckets = the_table()->number_of_entries() /
SharedSymbolTableBucketSize;
// calculation of num_buckets can result in zero buckets, we need at least one
CompactStringTableWriter writer(num_buckets > 1 ? num_buckets : 1,
&MetaspaceShared::stats()->string);
"Shared strings are excluded from the archive as UseG1GC, "
"UseCompressedOops and UseCompressedClassPointers are required."
"Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
BOOL_TO_STR(UseCompressedClassPointers));
} else {
int num_buckets = the_table()->number_of_entries() /
SharedSymbolTableBucketSize;
// calculation of num_buckets can result in zero buckets, we need at least one
CompactStringTableWriter writer(num_buckets > 1 ? num_buckets : 1,
&MetaspaceShared::stats()->string);
// Copy the interned strings into the "string space" within the java heap
if (copy_shared_string(string_space, &writer)) {
for (int i = 0; i < string_space->length(); i++) {
*space_size += string_space->at(i).byte_size();
}
writer.dump(&_shared_table);
}
// Copy the interned strings into the "string space" within the java heap
if (copy_shared_string(string_space, &writer)) {
writer.dump(&_shared_table);
}
}
#endif
}
void StringTable::serialize(SerializeClosure* soc) {
#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
_shared_table.set_type(CompactHashtable<oop, char>::_string_table);
_shared_table.serialize(soc);

View File

@ -162,8 +162,8 @@ public:
static void shared_oops_do(OopClosure* f);
static bool copy_shared_string(GrowableArray<MemRegion> *string_space,
CompactStringTableWriter* ch_table);
static void serialize(SerializeClosure* soc, GrowableArray<MemRegion> *string_space,
size_t* space_size);
static void write_to_archive(GrowableArray<MemRegion> *string_space);
static void serialize(SerializeClosure* soc);
// Rehash the symbol table if it gets out of balance
static void rehash_table();

View File

@ -32,6 +32,7 @@
#include "gc/shared/gcLocker.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
@ -57,9 +58,9 @@ Symbol* SymbolTable::allocate_symbol(const u1* name, int len, bool c_heap, TRAPS
Symbol* sym;
if (DumpSharedSpaces) {
// Allocate all symbols to CLD shared metaspace
sym = new (len, ClassLoaderData::the_null_class_loader_data(), THREAD) Symbol(name, len, PERM_REFCOUNT);
} else if (c_heap) {
c_heap = false;
}
if (c_heap) {
// refcount starts as 1
sym = new (len, THREAD) Symbol(name, len, 1);
assert(sym != NULL, "new should call vm_exit_out_of_memory if C_HEAP is exhausted");
@ -95,6 +96,18 @@ void SymbolTable::symbols_do(SymbolClosure *cl) {
}
}
void SymbolTable::metaspace_pointers_do(MetaspaceClosure* it) {
assert(DumpSharedSpaces, "called only during dump time");
const int n = the_table()->table_size();
for (int i = 0; i < n; i++) {
for (HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i);
p != NULL;
p = p->next()) {
it->push(p->literal_addr());
}
}
}
int SymbolTable::_symbols_removed = 0;
int SymbolTable::_symbols_counted = 0;
volatile int SymbolTable::_parallel_claimed_idx = 0;
@ -568,10 +581,10 @@ void SymbolTable::dump(outputStream* st, bool verbose) {
}
}
void SymbolTable::serialize(SerializeClosure* soc) {
void SymbolTable::write_to_archive() {
#if INCLUDE_CDS
_shared_table.reset();
if (soc->writing()) {
_shared_table.reset();
int num_buckets = the_table()->number_of_entries() /
SharedSymbolTableBucketSize;
CompactSymbolTableWriter writer(num_buckets,
@ -587,19 +600,22 @@ void SymbolTable::serialize(SerializeClosure* soc) {
}
writer.dump(&_shared_table);
}
_shared_table.set_type(CompactHashtable<Symbol*, char>::_symbol_table);
_shared_table.serialize(soc);
if (soc->writing()) {
// Verify table is correct
Symbol* sym = vmSymbols::java_lang_Object();
const char* name = (const char*)sym->bytes();
int len = sym->utf8_length();
unsigned int hash = hash_symbol(name, len);
assert(sym == _shared_table.lookup(name, hash, len), "sanity");
#endif
}
void SymbolTable::serialize(SerializeClosure* soc) {
#if INCLUDE_CDS
_shared_table.set_type(CompactHashtable<Symbol*, char>::_symbol_table);
_shared_table.serialize(soc);
if (soc->writing()) {
// Sanity. Make sure we don't use the shared table at dump time
_shared_table.reset();
}

View File

@ -213,6 +213,7 @@ public:
// iterate over symbols
static void symbols_do(SymbolClosure *cl);
static void metaspace_pointers_do(MetaspaceClosure* it);
// Symbol creation
static Symbol* new_symbol(const char* utf8_buffer, int length, TRAPS) {
@ -255,6 +256,7 @@ public:
static void read(const char* filename, TRAPS);
// Sharing
static void write_to_archive();
static void serialize(SerializeClosure* soc);
static u4 encode_shared(Symbol* sym);
static Symbol* decode_shared(u4 offset);

View File

@ -49,6 +49,7 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/filemap.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceKlass.hpp"
@ -1952,6 +1953,18 @@ void SystemDictionary::oops_do(OopClosure* f) {
ResolvedMethodTable::oops_do(f);
}
// CDS: scan and relocate all classes in the system dictionary.
void SystemDictionary::classes_do(MetaspaceClosure* it) {
ClassLoaderData::the_null_class_loader_data()->dictionary()->classes_do(it);
}
// CDS: scan and relocate all classes referenced by _well_known_klasses[].
void SystemDictionary::well_known_klasses_do(MetaspaceClosure* it) {
for (int id = FIRST_WKID; id < WKID_LIMIT; id++) {
it->push(well_known_klass_addr((WKID)id));
}
}
void SystemDictionary::methods_do(void f(Method*)) {
// Walk methods in loaded classes
ClassLoaderDataGraph::methods_do(f);
@ -2793,18 +2806,23 @@ ProtectionDomainCacheEntry* SystemDictionary::cache_get(Handle protection_domain
return _pd_cache_table->get(protection_domain);
}
void SystemDictionary::reorder_dictionary() {
ClassLoaderData::the_null_class_loader_data()->dictionary()->reorder_dictionary();
void SystemDictionary::reorder_dictionary_for_sharing() {
ClassLoaderData::the_null_class_loader_data()->dictionary()->reorder_dictionary_for_sharing();
}
size_t SystemDictionary::count_bytes_for_buckets() {
return ClassLoaderData::the_null_class_loader_data()->dictionary()->count_bytes_for_buckets();
}
void SystemDictionary::copy_buckets(char** top, char* end) {
size_t SystemDictionary::count_bytes_for_table() {
return ClassLoaderData::the_null_class_loader_data()->dictionary()->count_bytes_for_table();
}
void SystemDictionary::copy_buckets(char* top, char* end) {
ClassLoaderData::the_null_class_loader_data()->dictionary()->copy_buckets(top, end);
}
void SystemDictionary::copy_table(char** top, char* end) {
void SystemDictionary::copy_table(char* top, char* end) {
ClassLoaderData::the_null_class_loader_data()->dictionary()->copy_table(top, end);
}

View File

@ -353,7 +353,9 @@ public:
Handle class_loader,
TRAPS);
static void classes_do(MetaspaceClosure* it);
// Iterate over all methods in all klasses
static void methods_do(void f(Method*));
// Garbage collection support
@ -382,9 +384,11 @@ public:
public:
// Sharing support.
static void reorder_dictionary();
static void copy_buckets(char** top, char* end);
static void copy_table(char** top, char* end);
static void reorder_dictionary_for_sharing();
static size_t count_bytes_for_buckets();
static size_t count_bytes_for_table();
static void copy_buckets(char* top, char* end);
static void copy_table(char* top, char* end);
static void set_shared_dictionary(HashtableBucket<mtClass>* t, int length,
int number_of_entries);
// Printing
@ -442,6 +446,7 @@ public:
assert(id >= (int)FIRST_WKID && id < (int)WKID_LIMIT, "oob");
return &_well_known_klasses[id];
}
static void well_known_klasses_do(MetaspaceClosure* it);
// Local definition for direct access to the private array:
#define WK_KLASS(name) _well_known_klasses[SystemDictionary::WK_KLASS_ENUM_NAME(name)]

View File

@ -91,4 +91,9 @@ public:
TRAPS) {}
};
class SharedDictionaryEntry : public DictionaryEntry {
public:
void metaspace_pointers_do(MetaspaceClosure* it) {}
};
#endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP

View File

@ -26,6 +26,7 @@
#include "classfile/vmSymbols.hpp"
#include "compiler/compilerDirectives.hpp"
#include "memory/oopFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvm.h"
#include "runtime/handles.inline.hpp"
@ -183,6 +184,15 @@ void vmSymbols::symbols_do(SymbolClosure* f) {
}
}
void vmSymbols::metaspace_pointers_do(MetaspaceClosure *it) {
for (int index = (int)FIRST_SID; index < (int)SID_LIMIT; index++) {
it->push(&_symbols[index]);
}
for (int i = 0; i < T_VOID+1; i++) {
it->push(&_type_signatures[i]);
}
}
void vmSymbols::serialize(SerializeClosure* soc) {
soc->do_region((u_char*)&_symbols[FIRST_SID],
(SID_LIMIT - FIRST_SID) * sizeof(_symbols[0]));

View File

@ -1511,6 +1511,7 @@ class vmSymbols: AllStatic {
// Sharing support
static void symbols_do(SymbolClosure* f);
static void metaspace_pointers_do(MetaspaceClosure *it);
static void serialize(SerializeClosure* soc);
static Symbol* type_signature(BasicType t) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,10 +47,10 @@ void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); ret
void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); }
void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
size_t word_size, bool read_only,
size_t word_size,
MetaspaceObj::Type type, TRAPS) throw() {
// Klass has it's own operator new
return Metaspace::allocate(loader_data, word_size, read_only, type, THREAD);
return Metaspace::allocate(loader_data, word_size, type, THREAD);
}
bool MetaspaceObj::is_shared() const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -239,6 +239,7 @@ class _ValueObj {
//
class ClassLoaderData;
class MetaspaceClosure;
class MetaspaceObj {
public:
@ -260,9 +261,8 @@ class MetaspaceObj {
f(MethodData) \
f(ConstantPool) \
f(ConstantPoolCache) \
f(Annotation) \
f(MethodCounters) \
f(Deallocated)
f(Annotations) \
f(MethodCounters)
#define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type,
#define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;
@ -294,10 +294,15 @@ class MetaspaceObj {
}
void* operator new(size_t size, ClassLoaderData* loader_data,
size_t word_size, bool read_only,
size_t word_size,
Type type, Thread* thread) throw();
// can't use TRAPS from this header file.
void operator delete(void* p) { ShouldNotCallThis(); }
// Declare a *static* method with the same signature in any subclass of MetaspaceObj
// that should be read-only by default. See symbol.hpp for an example. This function
// is used by the templates in metaspaceClosure.hpp
static bool is_read_only_by_default() { return false; }
};
// Base class for classes that constitute name spaces.

View File

@ -38,6 +38,8 @@
#include "logging/logMessage.hpp"
#include "memory/filemap.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "oops/objArrayOop.hpp"
#include "prims/jvm.h"
@ -196,114 +198,97 @@ void FileMapInfo::FileMapHeader::populate(FileMapInfo* mapinfo, size_t alignment
get_header_version(_jvm_ident);
}
void FileMapInfo::allocate_classpath_entry_table() {
int bytes = 0;
int count = 0;
char* strptr = NULL;
char* strptr_max = NULL;
Thread* THREAD = Thread::current();
void SharedClassPathEntry::init(const char* name, TRAPS) {
_timestamp = 0;
_filesize = 0;
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
size_t entry_size = SharedClassUtil::shared_class_path_entry_size();
for (int pass=0; pass<2; pass++) {
// Process the modular java runtime image first
ClassPathEntry* jrt_entry = ClassLoader::get_jrt_entry();
assert(jrt_entry != NULL,
"No modular java runtime image present when allocating the CDS classpath entry table");
const char *name = jrt_entry->name();
int name_bytes = (int)(strlen(name) + 1);
if (pass == 0) {
count++;
bytes += (int)entry_size;
bytes += name_bytes;
log_info(class, path)("add main shared path for modular java runtime image %s", name);
} else {
// The java runtime image is always in slot 0 on the shared class path.
SharedClassPathEntry* ent = shared_classpath(0);
struct stat st;
if (os::stat(name, &st) == 0) {
ent->_timestamp = st.st_mtime;
ent->_filesize = st.st_size;
}
if (ent->_filesize == 0) {
// unknown
ent->_filesize = -2;
}
ent->_name = strptr;
assert(strptr + name_bytes <= strptr_max, "miscalculated buffer size");
strncpy(strptr, name, (size_t)name_bytes); // name_bytes includes trailing 0.
strptr += name_bytes;
}
// Walk the appended entries, which includes the entries added for the classpath.
ClassPathEntry *cpe = ClassLoader::classpath_entry(1);
// Since the java runtime image is always in slot 0 on the shared class path, the
// appended entries are started at slot 1 immediately after.
for (int cur_entry = 1 ; cpe != NULL; cpe = cpe->next(), cur_entry++) {
const char *name = cpe->name();
int name_bytes = (int)(strlen(name) + 1);
assert(!cpe->is_jrt(), "A modular java runtime image is present on the list of appended entries");
if (pass == 0) {
count ++;
bytes += (int)entry_size;
bytes += name_bytes;
log_info(class, path)("add main shared path (%s) %s", (cpe->is_jar_file() ? "jar" : "dir"), name);
} else {
SharedClassPathEntry* ent = shared_classpath(cur_entry);
if (cpe->is_jar_file()) {
struct stat st;
if (os::stat(name, &st) != 0) {
// The file/dir must exist, or it would not have been added
// into ClassLoader::classpath_entry().
//
// If we can't access a jar file in the boot path, then we can't
// make assumptions about where classes get loaded from.
FileMapInfo::fail_stop("Unable to open jar file %s.", name);
}
EXCEPTION_MARK; // The following call should never throw, but would exit VM on error.
SharedClassUtil::update_shared_classpath(cpe, ent, st.st_mtime, st.st_size, THREAD);
} else {
struct stat st;
if (os::stat(name, &st) == 0) {
if ((st.st_mode & S_IFMT) == S_IFDIR) {
if (!os::dir_is_empty(name)) {
ClassLoader::exit_with_path_failure(
struct stat st;
if (os::stat(name, &st) == 0) {
if ((st.st_mode & S_IFMT) == S_IFDIR) {
if (!os::dir_is_empty(name)) {
ClassLoader::exit_with_path_failure(
"Cannot have non-empty directory in archived classpaths", name);
}
ent->_filesize = -1;
}
}
if (ent->_filesize == 0) {
// unknown
ent->_filesize = -2;
}
}
ent->_name = strptr;
if (strptr + name_bytes <= strptr_max) {
strncpy(strptr, name, (size_t)name_bytes); // name_bytes includes trailing 0.
strptr += name_bytes;
} else {
assert(0, "miscalculated buffer size");
}
}
_is_dir = true;
} else {
_is_dir = false;
_timestamp = st.st_mtime;
_filesize = st.st_size;
}
} else {
// The file/dir must exist, or it would not have been added
// into ClassLoader::classpath_entry().
//
// If we can't access a jar file in the boot path, then we can't
// make assumptions about where classes get loaded from.
FileMapInfo::fail_stop("Unable to open file %s.", name);
}
size_t len = strlen(name) + 1;
_name = MetadataFactory::new_array<char>(ClassLoaderData::the_null_class_loader_data(), (int)len, THREAD);
strcpy(_name->data(), name);
}
bool SharedClassPathEntry::validate() {
struct stat st;
const char* name = this->name();
bool ok = true;
log_info(class, path)("checking shared classpath entry: %s", name);
if (os::stat(name, &st) != 0) {
FileMapInfo::fail_continue("Required classpath entry does not exist: %s", name);
ok = false;
} else if (is_dir()) {
if (!os::dir_is_empty(name)) {
FileMapInfo::fail_continue("directory is not empty: %s", name);
ok = false;
}
} else if (is_jar_or_bootimage()) {
if (_timestamp != st.st_mtime ||
_filesize != st.st_size) {
ok = false;
if (PrintSharedArchiveAndExit) {
FileMapInfo::fail_continue(_timestamp != st.st_mtime ?
"Timestamp mismatch" :
"File size mismatch");
} else {
FileMapInfo::fail_continue("A jar/jimage file is not the one used while building"
" the shared archive file: %s", name);
}
}
}
return ok;
}
if (pass == 0) {
void SharedClassPathEntry::metaspace_pointers_do(MetaspaceClosure* it) {
it->push(&_name);
it->push(&_manifest);
}
void FileMapInfo::allocate_classpath_entry_table() {
Thread* THREAD = Thread::current();
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
size_t entry_size = SharedClassUtil::shared_class_path_entry_size(); // assert ( should be 8 byte aligned??)
int num_entries = ClassLoader::number_of_classpath_entries();
size_t bytes = entry_size * num_entries;
_classpath_entry_table = MetadataFactory::new_array<u8>(loader_data, (int)(bytes + 7 / 8), THREAD);
_classpath_entry_table_size = num_entries;
_classpath_entry_size = entry_size;
assert(ClassLoader::get_jrt_entry() != NULL,
"No modular java runtime image present when allocating the CDS classpath entry table");
for (int i=0; i<num_entries; i++) {
ClassPathEntry *cpe = ClassLoader::classpath_entry(i);
const char* type = ((i == 0) ? "jrt" : (cpe->is_jar_file() ? "jar" : "dir"));
log_info(class, path)("add main shared path (%s) %s", type, cpe->name());
SharedClassPathEntry* ent = shared_classpath(i);
ent->init(cpe->name(), THREAD);
if (i > 0) { // No need to do jimage.
EXCEPTION_MARK; // The following call should never throw, but would exit VM on error.
Array<u8>* arr = MetadataFactory::new_array<u8>(loader_data, (bytes + 7)/8, THREAD);
strptr = (char*)(arr->data());
strptr_max = strptr + bytes;
SharedClassPathEntry* table = (SharedClassPathEntry*)strptr;
strptr += entry_size * count;
_classpath_entry_table_size = count;
_classpath_entry_table = table;
_classpath_entry_size = entry_size;
SharedClassUtil::update_shared_classpath(cpe, ent, THREAD);
}
}
}
@ -315,44 +300,19 @@ bool FileMapInfo::validate_classpath_entry_table() {
_classpath_entry_table = _header->_classpath_entry_table;
_classpath_entry_size = _header->_classpath_entry_size;
_classpath_entry_table_size = _header->_classpath_entry_table_size;
for (int i=0; i<count; i++) {
SharedClassPathEntry* ent = shared_classpath(i);
struct stat st;
const char* name = ent->_name;
bool ok = true;
log_info(class, path)("checking shared classpath entry: %s", name);
if (os::stat(name, &st) != 0) {
fail_continue("Required classpath entry does not exist: %s", name);
ok = false;
} else if (ent->is_dir()) {
if (!os::dir_is_empty(name)) {
fail_continue("directory is not empty: %s", name);
ok = false;
}
} else if (ent->is_jar_or_bootimage()) {
if (ent->_timestamp != st.st_mtime ||
ent->_filesize != st.st_size) {
ok = false;
if (PrintSharedArchiveAndExit) {
fail_continue(ent->_timestamp != st.st_mtime ?
"Timestamp mismatch" :
"File size mismatch");
} else {
fail_continue("A jar/jimage file is not the one used while building"
" the shared archive file: %s", name);
}
}
}
if (ok) {
if (shared_classpath(i)->validate()) {
log_info(class, path)("ok");
} else if (!PrintSharedArchiveAndExit) {
_validating_classpath_entry_table = false;
_classpath_entry_table = NULL;
_classpath_entry_table_size = 0;
return false;
}
}
_classpath_entry_table_size = _header->_classpath_entry_table_size;
_validating_classpath_entry_table = false;
return true;
}
@ -390,7 +350,7 @@ bool FileMapInfo::init_from_file(int fd) {
size_t len = lseek(fd, 0, SEEK_END);
struct FileMapInfo::FileMapHeader::space_info* si =
&_header->_space[MetaspaceShared::mc];
&_header->_space[MetaspaceShared::last_valid_region];
if (si->_file_offset >= len || len - si->_file_offset < si->_used) {
fail_continue("The shared archive file has been truncated.");
return false;
@ -469,28 +429,16 @@ void FileMapInfo::write_header() {
}
// Dump shared spaces to file.
void FileMapInfo::write_space(int i, Metaspace* space, bool read_only) {
align_file_position();
size_t used = space->used_bytes_slow(Metaspace::NonClassType);
size_t capacity = space->capacity_bytes_slow(Metaspace::NonClassType);
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
write_region(i, (char*)space->bottom(), used, capacity, read_only, false);
}
// Dump region to file.
void FileMapInfo::write_region(int region, char* base, size_t size,
size_t capacity, bool read_only,
bool allow_exec) {
bool read_only, bool allow_exec) {
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[region];
if (_file_open) {
guarantee(si->_file_offset == _file_offset, "file offset mismatch.");
log_info(cds)("Shared file region %d: " SIZE_FORMAT_HEX_W(6)
" bytes, addr " INTPTR_FORMAT " file offset " SIZE_FORMAT_HEX_W(6),
log_info(cds)("Shared file region %d: " SIZE_FORMAT_HEX_W(08)
" bytes, addr " INTPTR_FORMAT " file offset " SIZE_FORMAT_HEX_W(08),
region, size, p2i(base), _file_offset);
} else {
si->_file_offset = _file_offset;
@ -506,7 +454,6 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
si->_addr._base = base;
}
si->_used = size;
si->_capacity = capacity;
si->_read_only = read_only;
si->_allow_exec = allow_exec;
si->_crc = ClassLoader::crc32(0, base, (jint)size);
@ -523,25 +470,62 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
// The non-empty portion of the first region is written into the archive as one string
// region. The rest are consecutive full GC regions if they exist, which can be written
// out in one chunk as another string region.
void FileMapInfo::write_string_regions(GrowableArray<MemRegion> *regions) {
//
// Here's the mapping from (GrowableArray<MemRegion> *regions) -> (metaspace string regions).
// + We have 1 or more heap regions: r0, r1, r2 ..... rn
// + We have 2 metaspace string regions: s0 and s1
//
// If there's a single heap region (r0), then s0 == r0, and s1 is empty.
// Otherwise:
//
// "X" represented space that's occupied by heap objects.
// "_" represented unused spaced in the heap region.
//
//
// |r0 | r1 | r2 | ...... | rn |
// |XXXXXX|__ |XXXXX|XXXX|XXXXXXXX|XXXX|
// |<-s0->| |<- s1 ----------------->|
// ^^^
// |
// +-- unmapped space
void FileMapInfo::write_string_regions(GrowableArray<MemRegion> *regions,
char** st0_start, char** st0_top, char** st0_end,
char** st1_start, char** st1_top, char** st1_end) {
*st0_start = *st0_top = *st0_end = NULL;
*st1_start = *st1_top = *st1_end = NULL;
assert(MetaspaceShared::max_strings == 2, "this loop doesn't work for any other value");
for (int i = MetaspaceShared::first_string;
i < MetaspaceShared::first_string + MetaspaceShared::max_strings; i++) {
char* start = NULL;
size_t size = 0;
if (regions->is_nonempty()) {
int len = regions->length();
if (len > 0) {
if (i == MetaspaceShared::first_string) {
MemRegion first = regions->first();
start = (char*)first.start();
size = first.byte_size();
*st0_start = start;
*st0_top = start + size;
if (len > 1) {
*st0_end = (char*)regions->at(1).start();
} else {
*st0_end = start + size;
}
} else {
int len = regions->length();
assert(i == MetaspaceShared::first_string + 1, "must be");
if (len > 1) {
start = (char*)regions->at(1).start();
size = (char*)regions->at(len - 1).end() - start;
*st1_start = start;
*st1_top = start + size;
*st1_end = start + size;
}
}
}
write_region(i, start, size, size, false, false);
log_info(cds)("String region %d " INTPTR_FORMAT " - " INTPTR_FORMAT " = " SIZE_FORMAT_W(8) " bytes",
i, p2i(start), p2i(start + size), size);
write_region(i, start, size, false, false);
}
}
@ -609,7 +593,7 @@ void FileMapInfo::close() {
// JVM/TI RedefineClasses() support:
// Remap the shared readonly space to shared readwrite, private.
bool FileMapInfo::remap_shared_readonly_as_readwrite() {
int idx = 0;
int idx = MetaspaceShared::ro;
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[idx];
if (!si->_read_only) {
// the space is already readwrite so we are done
@ -639,10 +623,8 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
// Map the whole region at once, assumed to be allocated contiguously.
ReservedSpace FileMapInfo::reserve_shared_memory() {
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
char* requested_addr = _header->region_addr(0);
size_t size = FileMapInfo::shared_spaces_size();
size_t size = FileMapInfo::core_spaces_size();
// Reserve the space first, then map otherwise map will go right over some
// other reserved memory (like the code cache).
@ -862,9 +844,16 @@ void FileMapInfo::assert_mark(bool check) {
}
}
void FileMapInfo::metaspace_pointers_do(MetaspaceClosure* it) {
it->push(&_classpath_entry_table);
for (int i=0; i<_classpath_entry_table_size; i++) {
shared_classpath(i)->metaspace_pointers_do(it);
}
}
FileMapInfo* FileMapInfo::_current_info = NULL;
SharedClassPathEntry* FileMapInfo::_classpath_entry_table = NULL;
Array<u8>* FileMapInfo::_classpath_entry_table = NULL;
int FileMapInfo::_classpath_entry_table_size = 0;
size_t FileMapInfo::_classpath_entry_size = 0x1234baad;
bool FileMapInfo::_validating_classpath_entry_table = false;
@ -890,11 +879,6 @@ bool FileMapInfo::initialize() {
if (!validate_header()) {
return false;
}
SharedReadOnlySize = _header->_space[0]._capacity;
SharedReadWriteSize = _header->_space[1]._capacity;
SharedMiscDataSize = _header->_space[2]._capacity;
SharedMiscCodeSize = _header->_space[3]._capacity;
return true;
}
@ -1001,9 +985,12 @@ bool FileMapInfo::is_in_shared_space(const void* p) {
return false;
}
// Check if a given address is within one of the shared regions (ro, rw, md, mc)
// Check if a given address is within one of the shared regions ( ro, rw, mc or md)
bool FileMapInfo::is_in_shared_region(const void* p, int idx) {
assert((idx >= MetaspaceShared::ro) && (idx <= MetaspaceShared::mc), "invalid region index");
assert(idx == MetaspaceShared::ro ||
idx == MetaspaceShared::rw ||
idx == MetaspaceShared::mc ||
idx == MetaspaceShared::md, "invalid region index");
char* base = _header->region_addr(idx);
if (p >= base && p < base + _header->_space[idx]._used) {
return true;

View File

@ -42,24 +42,33 @@
static const int JVM_IDENT_MAX = 256;
class Metaspace;
class SharedClassPathEntry VALUE_OBJ_CLASS_SPEC {
public:
const char *_name;
protected:
bool _is_dir;
time_t _timestamp; // jar/jimage timestamp, 0 if is directory or other
long _filesize; // jar/jimage file size, -1 if is directory, -2 if other
Array<char>* _name;
Array<u1>* _manifest;
public:
void init(const char* name, TRAPS);
void metaspace_pointers_do(MetaspaceClosure* it);
bool validate();
// The _timestamp only gets set for jar files and "modules" jimage.
bool is_jar_or_bootimage() {
return _timestamp != 0;
}
bool is_dir() {
return _filesize == -1;
bool is_dir() { return _is_dir; }
bool is_jrt() { return ClassLoader::is_jrt(name()); }
time_t timestamp() const { return _timestamp; }
long filesize() const { return _filesize; }
const char* name() const { return _name->data(); }
const char* manifest() const {
return (_manifest == NULL) ? NULL : (const char*)_manifest->data();
}
bool is_jrt() {
return ClassLoader::is_jrt(_name);
int manifest_size() const {
return (_manifest == NULL) ? 0 : _manifest->length();
}
};
@ -68,7 +77,7 @@ private:
friend class ManifestStream;
enum {
_invalid_version = -1,
_current_version = 2
_current_version = 3
};
bool _file_open;
@ -76,7 +85,7 @@ private:
size_t _file_offset;
private:
static SharedClassPathEntry* _classpath_entry_table;
static Array<u8>* _classpath_entry_table;
static int _classpath_entry_table_size;
static size_t _classpath_entry_size;
static bool _validating_classpath_entry_table;
@ -110,8 +119,11 @@ public:
int _narrow_klass_shift; // save narrow klass base and shift
address _narrow_klass_base;
char* _misc_data_patching_start;
char* _read_only_tables_start;
address _cds_i2i_entry_code_buffers;
size_t _cds_i2i_entry_code_buffers_size;
size_t _core_spaces_size; // number of bytes allocated by the core spaces
// (mc, md, ro, rw and od).
struct space_info {
int _crc; // crc checksum of the current space
@ -121,7 +133,6 @@ public:
intx _offset; // offset from the compressed oop encoding base, only used
// by string space
} _addr;
size_t _capacity; // for validity checking
size_t _used; // for setting space top on read
bool _read_only; // read only space?
bool _allow_exec; // executable code in space?
@ -158,7 +169,7 @@ public:
// loading failures during runtime.
int _classpath_entry_table_size;
size_t _classpath_entry_size;
SharedClassPathEntry* _classpath_entry_table;
Array<u8>* _classpath_entry_table;
char* region_addr(int idx);
@ -177,6 +188,7 @@ public:
bool init_from_file(int fd);
void align_file_position();
bool validate_header_impl();
static void metaspace_pointers_do(MetaspaceClosure* it);
public:
FileMapInfo();
@ -195,10 +207,11 @@ public:
uintx max_heap_size() { return _header->_max_heap_size; }
address narrow_klass_base() const { return _header->_narrow_klass_base; }
int narrow_klass_shift() const { return _header->_narrow_klass_shift; }
size_t space_capacity(int i) { return _header->_space[i]._capacity; }
struct FileMapHeader* header() { return _header; }
char* misc_data_patching_start() { return _header->_misc_data_patching_start; }
void set_misc_data_patching_start(char* p) { _header->_misc_data_patching_start = p; }
char* read_only_tables_start() { return _header->_read_only_tables_start; }
void set_read_only_tables_start(char* p) { _header->_read_only_tables_start = p; }
address cds_i2i_entry_code_buffers() {
return _header->_cds_i2i_entry_code_buffers;
@ -212,6 +225,8 @@ public:
void set_cds_i2i_entry_code_buffers_size(size_t s) {
_header->_cds_i2i_entry_code_buffers_size = s;
}
void set_core_spaces_size(size_t s) { _header->_core_spaces_size = s; }
size_t core_spaces_size() { return _header->_core_spaces_size; }
static FileMapInfo* current_info() {
CDS_ONLY(return _current_info;)
@ -225,10 +240,11 @@ public:
bool open_for_read();
void open_for_write();
void write_header();
void write_space(int i, Metaspace* space, bool read_only);
void write_region(int region, char* base, size_t size,
size_t capacity, bool read_only, bool allow_exec);
void write_string_regions(GrowableArray<MemRegion> *regions);
bool read_only, bool allow_exec);
void write_string_regions(GrowableArray<MemRegion> *regions,
char** s0_start, char** s0_top, char** s0_end,
char** s1_start, char** s1_top, char** s1_end);
void write_bytes(const void* buffer, int count);
void write_bytes_aligned(const void* buffer, int count);
char* map_region(int i);
@ -255,29 +271,6 @@ public:
bool is_in_shared_region(const void* p, int idx) NOT_CDS_RETURN_(false);
void print_shared_spaces() NOT_CDS_RETURN;
// The ro+rw+md+mc spaces size
static size_t core_spaces_size() {
return align_up((SharedReadOnlySize + SharedReadWriteSize +
SharedMiscDataSize + SharedMiscCodeSize),
os::vm_allocation_granularity());
}
// The estimated optional space size.
//
// Currently the optional space only has archived class bytes.
// The core_spaces_size is the size of all class metadata, which is a good
// estimate of the total class bytes to be archived. Only the portion
// containing data is written out to the archive and mapped at runtime.
// There is no memory waste due to unused portion in optional space.
static size_t optional_space_size() {
return core_spaces_size();
}
// Total shared_spaces size includes the ro, rw, md, mc and od spaces
static size_t shared_spaces_size() {
return core_spaces_size() + optional_space_size();
}
// Stop CDS sharing and unmap CDS regions.
static void stop_sharing_and_unmap(const char* msg);
@ -288,13 +281,14 @@ public:
if (index < 0) {
return NULL;
}
char* p = (char*)_classpath_entry_table;
assert(index < _classpath_entry_table_size, "sanity");
char* p = (char*)_classpath_entry_table->data();
p += _classpath_entry_size * index;
return (SharedClassPathEntry*)p;
}
static const char* shared_classpath_name(int index) {
assert(index >= 0, "Sanity");
return shared_classpath(index)->_name;
return shared_classpath(index)->name();
}
static int get_number_of_share_classpaths() {

View File

@ -36,7 +36,7 @@ class MetadataFactory : AllStatic {
static Array<T>* new_array(ClassLoaderData* loader_data, int length, TRAPS) {
// The "true" argument is because all metadata arrays are read only when
// dumped to the shared archive
return new (loader_data, length, /*read_only*/true, THREAD) Array<T>(length);
return new (loader_data, length, THREAD) Array<T>(length);
}
template <typename T>
@ -48,48 +48,23 @@ class MetadataFactory : AllStatic {
return array;
}
template <typename T>
static Array<T>* new_writeable_array(ClassLoaderData* loader_data, int length, TRAPS) {
return new (loader_data, length, /*read_only*/false, THREAD) Array<T>(length);
}
template <typename T>
static Array<T>* new_writeable_array(ClassLoaderData* loader_data, int length, T value, TRAPS) {
Array<T>* array = new_writeable_array<T>(loader_data, length, CHECK_NULL);
for (int i = 0; i < length; i++) {
array->at_put(i, value);
}
return array;
}
template <typename T>
static void free_array(ClassLoaderData* loader_data, Array<T>* data) {
if (data != NULL) {
assert(loader_data != NULL, "shouldn't pass null");
assert(!data->is_shared(), "cannot deallocate array in shared spaces");
int size = data->size();
if (DumpSharedSpaces) {
loader_data->ro_metaspace()->deallocate((MetaWord*)data, size, false);
} else {
loader_data->metaspace_non_null()->deallocate((MetaWord*)data, size, false);
}
loader_data->metaspace_non_null()->deallocate((MetaWord*)data, size, false);
}
}
// Deallocation method for metadata
template <class T>
static void free_metadata(ClassLoaderData* loader_data, T md) {
if (DumpSharedSpaces) {
// FIXME: the freeing code is buggy, especially when -Xlog:cds is enabled.
// Disable for now -- this means if you specify bad classes in your classlist you
// may have wasted space inside the archive.
return;
}
if (md != NULL) {
assert(loader_data != NULL, "shouldn't pass null");
int size = md->size();
// Call metadata's deallocate function which will call deallocate fields
assert(!DumpSharedSpaces, "cannot deallocate metadata when dumping CDS archive");
assert(!md->on_stack(), "can't deallocate things on stack");
assert(!md->is_shared(), "cannot deallocate if in shared spaces");
md->deallocate_contents(loader_data);

View File

@ -65,6 +65,8 @@ MetaWord* last_allocated = 0;
size_t Metaspace::_compressed_class_space_size;
const MetaspaceTracer* Metaspace::_tracer = NULL;
DEBUG_ONLY(bool Metaspace::_frozen = false;)
// Used in declarations in SpaceManager and ChunkManager
enum ChunkIndex {
ZeroIndex = 0,
@ -502,34 +504,8 @@ static bool should_commit_large_pages_when_reserving(size_t bytes) {
// byte_size is the size of the associated virtualspace.
VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
assert_is_aligned(bytes, Metaspace::reserve_alignment());
#if INCLUDE_CDS
// This allocates memory with mmap. For DumpSharedspaces, try to reserve
// configurable address, generally at the top of the Java heap so other
// memory addresses don't conflict.
if (DumpSharedSpaces) {
bool large_pages = false; // No large pages when dumping the CDS archive.
char* shared_base = align_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
_rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
if (_rs.is_reserved()) {
assert(shared_base == 0 || _rs.base() == shared_base, "should match");
} else {
// Get a mmap region anywhere if the SharedBaseAddress fails.
_rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
}
if (!_rs.is_reserved()) {
vm_exit_during_initialization("Unable to allocate memory for shared space",
err_msg(SIZE_FORMAT " bytes.", bytes));
}
MetaspaceShared::initialize_shared_rs(&_rs);
} else
#endif
{
bool large_pages = should_commit_large_pages_when_reserving(bytes);
_rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
}
bool large_pages = should_commit_large_pages_when_reserving(bytes);
_rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
if (_rs.is_reserved()) {
assert(_rs.base() != NULL, "Catch if we get a NULL address");
@ -2148,8 +2124,6 @@ size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const
if (is_class()) {
switch (type) {
case Metaspace::BootMetaspaceType: requested = Metaspace::first_class_chunk_word_size(); break;
case Metaspace::ROMetaspaceType: requested = ClassSpecializedChunk; break;
case Metaspace::ReadWriteMetaspaceType: requested = ClassSpecializedChunk; break;
case Metaspace::AnonymousMetaspaceType: requested = ClassSpecializedChunk; break;
case Metaspace::ReflectionMetaspaceType: requested = ClassSpecializedChunk; break;
default: requested = ClassSmallChunk; break;
@ -2157,8 +2131,6 @@ size_t SpaceManager::get_initial_chunk_size(Metaspace::MetaspaceType type) const
} else {
switch (type) {
case Metaspace::BootMetaspaceType: requested = Metaspace::first_chunk_word_size(); break;
case Metaspace::ROMetaspaceType: requested = SharedReadOnlySize / wordSize; break;
case Metaspace::ReadWriteMetaspaceType: requested = SharedReadWriteSize / wordSize; break;
case Metaspace::AnonymousMetaspaceType: requested = SpecializedChunk; break;
case Metaspace::ReflectionMetaspaceType: requested = SpecializedChunk; break;
default: requested = SmallChunk; break;
@ -2651,15 +2623,6 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) {
// Is there space in the current chunk?
MetaWord* result = NULL;
// For DumpSharedSpaces, only allocate out of the current chunk which is
// never null because we gave it the size we wanted. Caller reports out
// of memory if this returns null.
if (DumpSharedSpaces) {
assert(current_chunk() != NULL, "should never happen");
inc_used_metrics(word_size);
return current_chunk()->allocate(word_size); // caller handles null result
}
if (current_chunk() != NULL) {
result = current_chunk()->allocate(word_size);
}
@ -3113,6 +3076,7 @@ ChunkManager* Metaspace::_chunk_manager_class = NULL;
static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
// Figure out the narrow_klass_base and the narrow_klass_shift. The
// narrow_klass_base is the lower of the metaspace base and the cds base
// (if cds is enabled). The narrow_klass_shift depends on the distance
@ -3121,7 +3085,7 @@ void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address
address higher_address;
#if INCLUDE_CDS
if (UseSharedSpaces) {
higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
(address)(metaspace_base + compressed_class_space_size()));
lower_base = MIN2(metaspace_base, cds_base);
} else
@ -3155,7 +3119,7 @@ bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cd
assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
address lower_base = MIN2((address)metaspace_base, cds_base);
address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
(address)(metaspace_base + compressed_class_space_size()));
return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
}
@ -3163,6 +3127,7 @@ bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cd
// Try to allocate the metaspace at the requested addr.
void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
assert(using_class_space(), "called improperly");
assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
@ -3358,116 +3323,73 @@ void Metaspace::ergo_initialize() {
void Metaspace::global_initialize() {
MetaspaceGC::initialize();
// Initialize the alignment for shared spaces.
int max_alignment = os::vm_allocation_granularity();
size_t cds_total = 0;
MetaspaceShared::set_max_alignment(max_alignment);
#if INCLUDE_CDS
if (DumpSharedSpaces) {
#if INCLUDE_CDS
MetaspaceShared::estimate_regions_size();
SharedReadOnlySize = align_up(SharedReadOnlySize, max_alignment);
SharedReadWriteSize = align_up(SharedReadWriteSize, max_alignment);
SharedMiscDataSize = align_up(SharedMiscDataSize, max_alignment);
SharedMiscCodeSize = align_up(SharedMiscCodeSize, max_alignment);
// Initialize with the sum of the shared space sizes. The read-only
// and read write metaspace chunks will be allocated out of this and the
// remainder is the misc code and data chunks.
cds_total = FileMapInfo::shared_spaces_size();
cds_total = align_up(cds_total, _reserve_alignment);
_space_list = new VirtualSpaceList(cds_total/wordSize);
_chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
if (!_space_list->initialization_succeeded()) {
vm_exit_during_initialization("Unable to dump shared archive.", NULL);
}
MetaspaceShared::initialize_shared_rs();
} else if (UseSharedSpaces) {
// If using shared space, open the file that contains the shared space
// and map in the memory before initializing the rest of metaspace (so
// the addresses don't conflict)
address cds_address = NULL;
FileMapInfo* mapinfo = new FileMapInfo();
// Open the shared archive file, read and validate the header. If
// initialization fails, shared spaces [UseSharedSpaces] are
// disabled and the file is closed.
// Map in spaces now also
if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
size_t cds_total = MetaspaceShared::core_spaces_size();
cds_address = (address)mapinfo->header()->region_addr(0);
#ifdef _LP64
if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
vm_exit_during_initialization("Unable to dump shared archive.",
err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
"klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(),
cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
}
// Set the compressed klass pointer base so that decoding of these pointers works
// properly when creating the shared archive.
assert(UseCompressedOops && UseCompressedClassPointers,
"UseCompressedOops and UseCompressedClassPointers must be set");
Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
log_develop_trace(gc, metaspace)("Setting_narrow_klass_base to Address: " PTR_FORMAT,
p2i(_space_list->current_virtual_space()->bottom()));
Universe::set_narrow_klass_shift(0);
#endif // _LP64
#endif // INCLUDE_CDS
} else {
#if INCLUDE_CDS
if (UseSharedSpaces) {
// If using shared space, open the file that contains the shared space
// and map in the memory before initializing the rest of metaspace (so
// the addresses don't conflict)
address cds_address = NULL;
FileMapInfo* mapinfo = new FileMapInfo();
// Open the shared archive file, read and validate the header. If
// initialization fails, shared spaces [UseSharedSpaces] are
// disabled and the file is closed.
// Map in spaces now also
if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
cds_total = FileMapInfo::shared_spaces_size();
cds_address = (address)mapinfo->header()->region_addr(0);
#ifdef _LP64
if (using_class_space()) {
char* cds_end = (char*)(cds_address + cds_total);
cds_end = align_up(cds_end, _reserve_alignment);
// If UseCompressedClassPointers is set then allocate the metaspace area
// above the heap and above the CDS area (if it exists).
allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
// Map the shared string space after compressed pointers
// because it relies on compressed class pointers setting to work
mapinfo->map_string_regions();
}
#endif // _LP64
} else {
assert(!mapinfo->is_open() && !UseSharedSpaces,
"archive file not closed or shared spaces not disabled.");
if (using_class_space()) {
char* cds_end = (char*)(cds_address + cds_total);
cds_end = (char *)align_up(cds_end, _reserve_alignment);
// If UseCompressedClassPointers is set then allocate the metaspace area
// above the heap and above the CDS area (if it exists).
allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
// Map the shared string space after compressed pointers
// because it relies on compressed class pointers setting to work
mapinfo->map_string_regions();
}
#endif // _LP64
} else {
assert(!mapinfo->is_open() && !UseSharedSpaces,
"archive file not closed or shared spaces not disabled.");
}
}
#endif // INCLUDE_CDS
#ifdef _LP64
if (!UseSharedSpaces && using_class_space()) {
if (!UseSharedSpaces && using_class_space()) {
if (DumpSharedSpaces) {
// Already initialized inside MetaspaceShared::initialize_shared_rs()
} else {
char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
allocate_metaspace_compressed_klass_ptrs(base, 0);
}
}
#endif // _LP64
// Initialize these before initializing the VirtualSpaceList
_first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
_first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
// Make the first class chunk bigger than a medium chunk so it's not put
// on the medium chunk list. The next chunk will be small and progress
// from there. This size calculated by -version.
_first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
(CompressedClassSpaceSize/BytesPerWord)*2);
_first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
// Arbitrarily set the initial virtual space to a multiple
// of the boot class loader size.
size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
word_size = align_up(word_size, Metaspace::reserve_alignment_words());
// Initialize these before initializing the VirtualSpaceList
_first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
_first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
// Make the first class chunk bigger than a medium chunk so it's not put
// on the medium chunk list. The next chunk will be small and progress
// from there. This size calculated by -version.
_first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
(CompressedClassSpaceSize/BytesPerWord)*2);
_first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
// Arbitrarily set the initial virtual space to a multiple
// of the boot class loader size.
size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
word_size = align_up(word_size, Metaspace::reserve_alignment_words());
// Initialize the list of virtual spaces.
_space_list = new VirtualSpaceList(word_size);
_chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
// Initialize the list of virtual spaces.
_space_list = new VirtualSpaceList(word_size);
_chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
if (!_space_list->initialization_succeeded()) {
vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
}
if (!_space_list->initialization_succeeded()) {
vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
}
_tracer = new MetaspaceTracer();
@ -3496,11 +3418,6 @@ Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType
get_space_manager(mdtype)->medium_chunk_bunch());
}
// For dumping shared archive, report error if allocation has failed.
if (DumpSharedSpaces && chunk == NULL) {
report_insufficient_metaspace(MetaspaceAux::committed_bytes() + chunk_word_size * BytesPerWord);
}
return chunk;
}
@ -3534,9 +3451,6 @@ void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
if (using_class_space()) {
initialize_first_chunk(type, ClassType);
}
_alloc_record_head = NULL;
_alloc_record_tail = NULL;
}
size_t Metaspace::align_word_size_up(size_t word_size) {
@ -3545,8 +3459,8 @@ size_t Metaspace::align_word_size_up(size_t word_size) {
}
MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
// DumpSharedSpaces doesn't use class metadata area (yet)
// Also, don't use class_vsm() unless UseCompressedClassPointers is true.
assert(!_frozen, "sanity");
// Don't use class_vsm() unless UseCompressedClassPointers is true.
if (is_class_space_allocation(mdtype)) {
return class_vsm()->allocate(word_size);
} else {
@ -3555,6 +3469,7 @@ MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
}
MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
assert(!_frozen, "sanity");
size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
assert(delta_bytes > 0, "Must be");
@ -3580,13 +3495,6 @@ MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype)
return res;
}
// Space allocated in the Metaspace. This may
// be across several metadata virtual spaces.
char* Metaspace::bottom() const {
assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
return (char*)vsm()->current_chunk()->bottom();
}
size_t Metaspace::used_words_slow(MetadataType mdtype) const {
if (mdtype == ClassType) {
return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
@ -3596,6 +3504,7 @@ size_t Metaspace::used_words_slow(MetadataType mdtype) const {
}
size_t Metaspace::free_words_slow(MetadataType mdtype) const {
assert(!_frozen, "sanity");
if (mdtype == ClassType) {
return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
} else {
@ -3635,13 +3544,10 @@ size_t Metaspace::allocated_chunks_bytes() const {
}
void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
assert(!_frozen, "sanity");
assert(!SafepointSynchronize::is_at_safepoint()
|| Thread::current()->is_VM_thread(), "should be the VM thread");
if (DumpSharedSpaces && log_is_enabled(Info, cds)) {
record_deallocation(ptr, vsm()->get_allocation_word_size(word_size));
}
MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
if (is_class && using_class_space()) {
@ -3651,9 +3557,9 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
}
}
MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
bool read_only, MetaspaceObj::Type type, TRAPS) {
MetaspaceObj::Type type, TRAPS) {
assert(!_frozen, "sanity");
if (HAS_PENDING_EXCEPTION) {
assert(false, "Should not allocate with exception pending");
return NULL; // caller does a CHECK_NULL too
@ -3662,26 +3568,6 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
"ClassLoaderData::the_null_class_loader_data() should have been used.");
// Allocate in metaspaces without taking out a lock, because it deadlocks
// with the SymbolTable_lock. Dumping is single threaded for now. We'll have
// to revisit this for application class data sharing.
if (DumpSharedSpaces) {
assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
MetaWord* result = space->allocate(word_size, NonClassType);
if (result == NULL) {
report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
}
if (log_is_enabled(Info, cds)) {
space->record_allocation(result, type, space->vsm()->get_allocation_word_size(word_size));
}
// Zero initialize.
Copy::fill_to_words((HeapWord*)result, word_size, 0);
return result;
}
MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
// Try to allocate metadata.
@ -3788,78 +3674,6 @@ const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
}
}
void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
assert(DumpSharedSpaces, "sanity");
int byte_size = (int)word_size * wordSize;
AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
if (_alloc_record_head == NULL) {
_alloc_record_head = _alloc_record_tail = rec;
} else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
_alloc_record_tail->_next = rec;
_alloc_record_tail = rec;
} else {
// slow linear search, but this doesn't happen that often, and only when dumping
for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
if (old->_ptr == ptr) {
assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
int remain_bytes = old->_byte_size - byte_size;
assert(remain_bytes >= 0, "sanity");
old->_type = type;
if (remain_bytes == 0) {
delete(rec);
} else {
address remain_ptr = address(ptr) + byte_size;
rec->_ptr = remain_ptr;
rec->_byte_size = remain_bytes;
rec->_type = MetaspaceObj::DeallocatedType;
rec->_next = old->_next;
old->_byte_size = byte_size;
old->_next = rec;
}
return;
}
}
assert(0, "reallocating a freed pointer that was not recorded");
}
}
void Metaspace::record_deallocation(void* ptr, size_t word_size) {
assert(DumpSharedSpaces, "sanity");
for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
if (rec->_ptr == ptr) {
assert(rec->_byte_size == (int)word_size * wordSize, "sanity");
rec->_type = MetaspaceObj::DeallocatedType;
return;
}
}
assert(0, "deallocating a pointer that was not recorded");
}
void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
address last_addr = (address)bottom();
for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
address ptr = rec->_ptr;
if (last_addr < ptr) {
closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
}
closure->doit(ptr, rec->_type, rec->_byte_size);
last_addr = ptr + rec->_byte_size;
}
address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
if (last_addr < top) {
closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
}
}
void Metaspace::purge(MetadataType mdtype) {
get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
}

View File

@ -35,8 +35,6 @@
// Metaspaces are Arenas for the VM's metadata.
// They are allocated one per class loader object, and one for the null
// bootstrap class loader
// Eventually for bootstrap loader we'll have a read-only section and read-write
// to write for DumpSharedSpaces and read for UseSharedSpaces
//
// block X ---+ +-------------------+
// | | Virtualspace |
@ -87,6 +85,7 @@ class Metaspace : public CHeapObj<mtClass> {
friend class VM_CollectForMetadataAllocation;
friend class MetaspaceGC;
friend class MetaspaceAux;
friend class MetaspaceShared;
friend class CollectorPolicy;
public:
@ -98,8 +97,6 @@ class Metaspace : public CHeapObj<mtClass> {
enum MetaspaceType {
StandardMetaspaceType,
BootMetaspaceType,
ROMetaspaceType,
ReadWriteMetaspaceType,
AnonymousMetaspaceType,
ReflectionMetaspaceType
};
@ -134,6 +131,7 @@ class Metaspace : public CHeapObj<mtClass> {
static size_t _commit_alignment;
static size_t _reserve_alignment;
DEBUG_ONLY(static bool _frozen;)
SpaceManager* _vsm;
SpaceManager* vsm() const { return _vsm; }
@ -177,12 +175,11 @@ class Metaspace : public CHeapObj<mtClass> {
}
static const MetaspaceTracer* tracer() { return _tracer; }
static void freeze() {
assert(DumpSharedSpaces, "sanity");
DEBUG_ONLY(_frozen = true;)
}
private:
// These 2 methods are used by DumpSharedSpaces only, where only _vsm is used. So we will
// maintain a single list for now.
void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
void record_deallocation(void* ptr, size_t word_size);
#ifdef _LP64
static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base);
@ -194,20 +191,6 @@ class Metaspace : public CHeapObj<mtClass> {
static void initialize_class_space(ReservedSpace rs);
#endif
class AllocRecord : public CHeapObj<mtClass> {
public:
AllocRecord(address ptr, MetaspaceObj::Type type, int byte_size)
: _next(NULL), _ptr(ptr), _type(type), _byte_size(byte_size) {}
AllocRecord *_next;
address _ptr;
MetaspaceObj::Type _type;
int _byte_size;
};
AllocRecord * _alloc_record_head;
AllocRecord * _alloc_record_tail;
size_t class_chunk_size(size_t word_size);
public:
@ -227,7 +210,6 @@ class Metaspace : public CHeapObj<mtClass> {
static size_t commit_alignment() { return _commit_alignment; }
static size_t commit_alignment_words() { return _commit_alignment / BytesPerWord; }
char* bottom() const;
size_t used_words_slow(MetadataType mdtype) const;
size_t free_words_slow(MetadataType mdtype) const;
size_t capacity_words_slow(MetadataType mdtype) const;
@ -239,7 +221,7 @@ class Metaspace : public CHeapObj<mtClass> {
size_t allocated_chunks_bytes() const;
static MetaWord* allocate(ClassLoaderData* loader_data, size_t word_size,
bool read_only, MetaspaceObj::Type type, TRAPS);
MetaspaceObj::Type type, TRAPS);
void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
static bool contains(const void* ptr);
@ -262,16 +244,9 @@ class Metaspace : public CHeapObj<mtClass> {
static void print_compressed_class_space(outputStream* st, const char* requested_addr = 0) NOT_LP64({});
class AllocRecordClosure : public StackObj {
public:
virtual void doit(address ptr, MetaspaceObj::Type type, int byte_size) = 0;
};
void iterate(AllocRecordClosure *closure);
// Return TRUE only if UseCompressedClassPointers is True and DumpSharedSpaces is False.
// Return TRUE only if UseCompressedClassPointers is True.
static bool using_class_space() {
return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers && !DumpSharedSpaces);
return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers);
}
static bool is_class_space_allocation(MetadataType mdType) {

View File

@ -0,0 +1,68 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "memory/metaspaceClosure.hpp"
// Update the reference to point to new_loc.
void MetaspaceClosure::Ref::update(address new_loc) const {
log_trace(cds)("Ref: [" PTR_FORMAT "] -> " PTR_FORMAT " => " PTR_FORMAT,
p2i(mpp()), p2i(obj()), p2i(new_loc));
uintx p = (uintx)new_loc;
p |= flag_bits(); // Make sure the flag bits are copied to the new pointer.
*(address*)mpp() = (address)p;
}
void MetaspaceClosure::push_impl(MetaspaceClosure::Ref* ref, Writability w) {
if (ref->not_null()) {
bool read_only;
switch (w) {
case _writable:
read_only = false;
break;
case _not_writable:
read_only = true;
break;
default:
assert(w == _default, "must be");
read_only = ref->is_read_only_by_default();
}
if (do_ref(ref, read_only)) { // true means we want to iterate the embedded pointer in <ref>
ref->metaspace_pointers_do(this);
}
}
}
bool UniqueMetaspaceClosure::do_ref(MetaspaceClosure::Ref* ref, bool read_only) {
bool* found = _has_been_visited.get(ref->obj());
if (found != NULL) {
assert(*found == read_only, "must be");
return false; // Already visited: no need to iterate embedded pointers.
} else {
bool isnew = _has_been_visited.put(ref->obj(), read_only);
assert(isnew, "sanity");
do_unique_ref(ref, read_only);
return true; // Saw this for the first time: iterate the embedded pointers.
}
}

View File

@ -0,0 +1,281 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_MEMORY_METASPACE_ITERATOR_HPP
#define SHARE_VM_MEMORY_METASPACE_ITERATOR_HPP
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "oops/array.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/resourceHash.hpp"
// The metadata hierarchy is separate from the oop hierarchy
class MetaspaceObj; // no C++ vtable
//class Array; // no C++ vtable
class Annotations; // no C++ vtable
class ConstantPoolCache; // no C++ vtable
class ConstMethod; // no C++ vtable
class MethodCounters; // no C++ vtable
class Symbol; // no C++ vtable
class Metadata; // has C++ vtable (so do all subclasses)
class ConstantPool;
class MethodData;
class Method;
class Klass;
class InstanceKlass;
class InstanceMirrorKlass;
class InstanceClassLoaderKlass;
class InstanceRefKlass;
class ArrayKlass;
class ObjArrayKlass;
class TypeArrayKlass;
// class MetaspaceClosure --
//
// This class is used for iterating the objects in the HotSpot Metaspaces. It
// provides an API to walk all the reachable objects starting from a set of
// root references (such as all Klass'es in the SystemDictionary).
//
// Currently it is used for compacting the CDS archive by eliminate temporary
// objects allocated during archive creation time. See ArchiveCompactor in
// metaspaceShared.cpp for an example.
//
// To support MetaspaceClosure, each subclass of MetaspaceObj must provide
// a method of the type void metaspace_pointers_do(MetaspaceClosure*). This method
// should call MetaspaceClosure::push() on every pointer fields of this
// class that points to a MetaspaceObj. See Annotations::metaspace_pointers_do()
// for an example.
class MetaspaceClosure {
public:
enum Writability {
_writable,
_not_writable,
_default
};
// class MetaspaceClosure::Ref --
//
// MetaspaceClosure can be viewed as a very simple type of copying garbage
// collector. For it to function properly, it requires each subclass of
// MetaspaceObj to provide two methods:
//
// size_t size(); -- to determine how much data to copy
// void metaspace_pointers_do(MetaspaceClosure*); -- to locate all the embedded pointers
//
// Calling these methods would be trivial if these two were virtual methods.
// However, to save space, MetaspaceObj has NO vtable. The vtable is introduced
// only in the Metadata class.
//
// To work around the lack of a vtable, we use Ref class with templates
// (see ObjectRef, PrimitiveArrayRef and PointerArrayRef)
// so that we can statically discover the type of a object. The use of Ref
// depends on the fact that:
//
// [1] We don't use polymorphic pointers for MetaspaceObj's that are not subclasses
// of Metadata. I.e., we don't do this:
// class Klass {
// MetaspaceObj *_obj;
// Array<int>* foo() { return (Array<int>*)_obj; }
// Symbol* bar() { return (Symbol*) _obj; }
//
// [2] All Array<T> dimensions are statically declared.
class Ref {
protected:
virtual void** mpp() const = 0;
public:
virtual bool not_null() const = 0;
virtual int size() const = 0;
virtual void metaspace_pointers_do(MetaspaceClosure *it) const = 0;
virtual void metaspace_pointers_do_at(MetaspaceClosure *it, address new_loc) const = 0;
virtual MetaspaceObj::Type msotype() const = 0;
virtual bool is_read_only_by_default() const = 0;
address obj() const {
// In some rare cases (see CPSlot in constantPool.hpp) we store some flags in the lowest
// 2 bits of a MetaspaceObj pointer. Unmask these when manipulating the pointer.
uintx p = (uintx)*mpp();
return (address)(p & (~FLAG_MASK));
}
void update(address new_loc) const;
private:
static const uintx FLAG_MASK = 0x03;
int flag_bits() const {
uintx p = (uintx)*mpp();
return (int)(p & FLAG_MASK);
}
};
private:
// -------------------------------------------------- ObjectRef
template <class T> class ObjectRef : public Ref {
T** _mpp;
T* dereference() const {
return *_mpp;
}
protected:
virtual void** mpp() const {
return (void**)_mpp;
}
public:
ObjectRef(T** mpp) : _mpp(mpp) {}
virtual bool is_read_only_by_default() const { return T::is_read_only_by_default(); }
virtual bool not_null() const { return dereference() != NULL; }
virtual int size() const { return dereference()->size(); }
virtual MetaspaceObj::Type msotype() const { return dereference()->type(); }
virtual void metaspace_pointers_do(MetaspaceClosure *it) const {
dereference()->metaspace_pointers_do(it);
}
virtual void metaspace_pointers_do_at(MetaspaceClosure *it, address new_loc) const {
((T*)new_loc)->metaspace_pointers_do(it);
}
};
// -------------------------------------------------- PrimitiveArrayRef
template <class T> class PrimitiveArrayRef : public Ref {
Array<T>** _mpp;
Array<T>* dereference() const {
return *_mpp;
}
protected:
virtual void** mpp() const {
return (void**)_mpp;
}
public:
PrimitiveArrayRef(Array<T>** mpp) : _mpp(mpp) {}
// all Arrays are read-only by default
virtual bool is_read_only_by_default() const { return true; }
virtual bool not_null() const { return dereference() != NULL; }
virtual int size() const { return dereference()->size(); }
virtual MetaspaceObj::Type msotype() const { return MetaspaceObj::array_type(sizeof(T)); }
virtual void metaspace_pointers_do(MetaspaceClosure *it) const {
Array<T>* array = dereference();
log_trace(cds)("Iter(PrimitiveArray): %p [%d]", array, array->length());
}
virtual void metaspace_pointers_do_at(MetaspaceClosure *it, address new_loc) const {
Array<T>* array = (Array<T>*)new_loc;
log_trace(cds)("Iter(PrimitiveArray): %p [%d]", array, array->length());
}
};
// -------------------------------------------------- PointerArrayRef
template <class T> class PointerArrayRef : public Ref {
Array<T*>** _mpp;
Array<T*>* dereference() const {
return *_mpp;
}
protected:
virtual void** mpp() const {
return (void**)_mpp;
}
public:
PointerArrayRef(Array<T*>** mpp) : _mpp(mpp) {}
// all Arrays are read-only by default
virtual bool is_read_only_by_default() const { return true; }
virtual bool not_null() const { return dereference() != NULL; }
virtual int size() const { return dereference()->size(); }
virtual MetaspaceObj::Type msotype() const { return MetaspaceObj::array_type(sizeof(T*)); }
virtual void metaspace_pointers_do(MetaspaceClosure *it) const {
metaspace_pointers_do_at_impl(it, dereference());
}
virtual void metaspace_pointers_do_at(MetaspaceClosure *it, address new_loc) const {
metaspace_pointers_do_at_impl(it, (Array<T*>*)new_loc);
}
private:
void metaspace_pointers_do_at_impl(MetaspaceClosure *it, Array<T*>* array) const {
log_trace(cds)("Iter(ObjectArray): %p [%d]", array, array->length());
for (int i = 0; i < array->length(); i++) {
T** mpp = array->adr_at(i);
it->push(mpp);
}
}
};
void push_impl(Ref* ref, Writability w);
public:
// returns true if we want to keep iterating the pointers embedded inside <ref>
virtual bool do_ref(Ref* ref, bool read_only) = 0;
// When you do:
// void MyType::metaspace_pointers_do(MetaspaceClosure* it) {
// it->push(_my_field)
//
// C++ will try to match the "most specific" template function. This one will
// will be matched if possible (if mpp is an Array<> of any pointer type).
template <typename T> void push(Array<T*>** mpp, Writability w = _default) {
PointerArrayRef<T> ref(mpp);
push_impl(&ref, w);
}
// If the above function doesn't match (mpp is an Array<>, but T is not a pointer type), then
// this is the second choice.
template <typename T> void push(Array<T>** mpp, Writability w = _default) {
PrimitiveArrayRef<T> ref(mpp);
push_impl(&ref, w);
}
// If the above function doesn't match (mpp is not an Array<> type), then
// this will be matched by default.
template <class T> void push(T** mpp, Writability w = _default) {
ObjectRef<T> ref(mpp);
push_impl(&ref, w);
}
};
// This is a special MetaspaceClosure that visits each unique MetaspaceObj once.
class UniqueMetaspaceClosure : public MetaspaceClosure {
// Do not override. Returns true if we are discovering ref->obj() for the first time.
virtual bool do_ref(Ref* ref, bool read_only);
public:
// Gets called the first time we discover an object.
virtual void do_unique_ref(Ref* ref, bool read_only) = 0;
private:
static unsigned my_hash(const address& a) {
return primitive_hash<address>(a);
}
static bool my_equals(const address& a0, const address& a1) {
return primitive_equals<address>(a0, a1);
}
ResourceHashtable<
address, bool,
UniqueMetaspaceClosure::my_hash, // solaris compiler doesn't like: primitive_hash<address>
UniqueMetaspaceClosure::my_equals, // solaris compiler doesn't like: primitive_equals<address>
16384> _has_been_visited;
};
#endif // SHARE_VM_MEMORY_METASPACE_ITERATOR_HPP

File diff suppressed because it is too large Load Diff

View File

@ -32,45 +32,7 @@
#include "utilities/exceptions.hpp"
#include "utilities/macros.hpp"
#define DEFAULT_SHARED_READ_WRITE_SIZE (NOT_LP64(6*M) LP64_ONLY(10*M))
#define MIN_SHARED_READ_WRITE_SIZE (NOT_LP64(6*M) LP64_ONLY(10*M))
#define DEFAULT_SHARED_READ_ONLY_SIZE (NOT_LP64(8*M) LP64_ONLY(13*M))
#define MIN_SHARED_READ_ONLY_SIZE (NOT_LP64(8*M) LP64_ONLY(13*M))
// the MIN_SHARED_MISC_DATA_SIZE and MIN_SHARED_MISC_CODE_SIZE estimates are based on
// the sizes required for dumping the archive using the default classlist. The sizes
// are multiplied by 1.5 for a safety margin.
#define DEFAULT_SHARED_MISC_DATA_SIZE (NOT_LP64(2*M) LP64_ONLY(4*M))
#define MIN_SHARED_MISC_DATA_SIZE (NOT_LP64(1*M) LP64_ONLY(1200*K))
#define DEFAULT_SHARED_MISC_CODE_SIZE (120*K)
#define MIN_SHARED_MISC_CODE_SIZE (NOT_LP64(63*K) LP64_ONLY(69*K))
#define DEFAULT_COMBINED_SIZE (DEFAULT_SHARED_READ_WRITE_SIZE+DEFAULT_SHARED_READ_ONLY_SIZE+DEFAULT_SHARED_MISC_DATA_SIZE+DEFAULT_SHARED_MISC_CODE_SIZE)
// the max size is the MAX size (ie. 0x7FFFFFFF) - the total size of
// the other 3 sections - page size (to avoid overflow in case the final
// size will get aligned up on page size)
#define SHARED_PAGE ((size_t)os::vm_page_size())
#define MAX_SHARED_DELTA (0x7FFFFFFF)
#define MAX_SHARED_READ_WRITE_SIZE (MAX_SHARED_DELTA-(MIN_SHARED_READ_ONLY_SIZE+MIN_SHARED_MISC_DATA_SIZE+MIN_SHARED_MISC_CODE_SIZE)-SHARED_PAGE)
#define MAX_SHARED_READ_ONLY_SIZE (MAX_SHARED_DELTA-(MIN_SHARED_READ_WRITE_SIZE+MIN_SHARED_MISC_DATA_SIZE+MIN_SHARED_MISC_CODE_SIZE)-SHARED_PAGE)
#define MAX_SHARED_MISC_DATA_SIZE (MAX_SHARED_DELTA-(MIN_SHARED_READ_WRITE_SIZE+MIN_SHARED_READ_ONLY_SIZE+MIN_SHARED_MISC_CODE_SIZE)-SHARED_PAGE)
#define MAX_SHARED_MISC_CODE_SIZE (MAX_SHARED_DELTA-(MIN_SHARED_READ_WRITE_SIZE+MIN_SHARED_READ_ONLY_SIZE+MIN_SHARED_MISC_DATA_SIZE)-SHARED_PAGE)
#define LargeSharedArchiveSize (300*M)
#define HugeSharedArchiveSize (800*M)
#define ReadOnlyRegionPercentage 0.52
#define ReadWriteRegionPercentage 0.43
#define MiscDataRegionPercentage 0.03
#define MiscCodeRegionPercentage 0.02
#define LargeThresholdClassCount 5000
#define HugeThresholdClassCount 40000
#define SET_ESTIMATED_SIZE(type, region) \
Shared ##region## Size = FLAG_IS_DEFAULT(Shared ##region## Size) ? \
(uintx)(type ## SharedArchiveSize * region ## RegionPercentage) : Shared ## region ## Size
class FileMapInfo;
@ -83,31 +45,12 @@ public:
CompactHashtableStats string;
};
class SharedMiscRegion VALUE_OBJ_CLASS_SPEC {
private:
VirtualSpace _vs;
char* _alloc_top;
SharedSpaceType _space_type;
public:
void initialize(ReservedSpace rs, size_t committed_byte_size, SharedSpaceType space_type);
VirtualSpace* virtual_space() {
return &_vs;
}
char* low() const {
return _vs.low();
}
char* alloc_top() const {
return _alloc_top;
}
char* alloc(size_t num_bytes) NOT_CDS_RETURN_(NULL);
};
// Class Data Sharing Support
class MetaspaceShared : AllStatic {
// CDS support
static ReservedSpace* _shared_rs;
static ReservedSpace _shared_rs;
static VirtualSpace _shared_vs;
static int _max_alignment;
static MetaspaceSharedStats _stats;
static bool _has_error_classes;
@ -115,49 +58,46 @@ class MetaspaceShared : AllStatic {
static bool _remapped_readwrite;
static address _cds_i2i_entry_code_buffers;
static size_t _cds_i2i_entry_code_buffers_size;
// Used only during dumping.
static SharedMiscRegion _md;
static SharedMiscRegion _mc;
static SharedMiscRegion _od;
static size_t _core_spaces_size;
public:
enum {
ro = 0, // read-only shared space in the heap
mc = 0, // miscellaneous code for method trampolines
rw = 1, // read-write shared space in the heap
md = 2, // miscellaneous data for initializing tables, etc.
mc = 3, // miscellaneous code - vtable replacement.
ro = 2, // read-only shared space in the heap
md = 3, // miscellaneous data for initializing tables, etc.
max_strings = 2, // max number of string regions in string space
num_non_strings = 4, // number of non-string regions
first_string = num_non_strings, // index of first string region
// The optional data region is the last region.
// Currently it only contains class file data.
od = max_strings + num_non_strings,
last_valid_region = od,
n_regions = od + 1 // total number of regions
};
// Accessor functions to save shared space created for metadata, which has
// extra space allocated at the end for miscellaneous data and code.
static void set_max_alignment(int alignment) {
CDS_ONLY(_max_alignment = alignment);
}
static int max_alignment() {
CDS_ONLY(return _max_alignment);
NOT_CDS(return 0);
}
static void prepare_for_dumping() NOT_CDS_RETURN;
static void preload_and_dump(TRAPS) NOT_CDS_RETURN;
static int preload_and_dump(const char * class_list_path,
GrowableArray<Klass*>* class_promote_order,
TRAPS) NOT_CDS_RETURN_(0);
static int preload_classes(const char * class_list_path,
TRAPS) NOT_CDS_RETURN_(0);
static ReservedSpace* shared_rs() {
CDS_ONLY(return _shared_rs);
CDS_ONLY(return &_shared_rs);
NOT_CDS(return NULL);
}
static void commit_shared_space_to(char* newtop) NOT_CDS_RETURN;
static size_t core_spaces_size() {
return _core_spaces_size;
}
static void initialize_shared_rs() NOT_CDS_RETURN;
static void initialize_shared_rs(ReservedSpace* rs) NOT_CDS_RETURN;
// Delta of this object from the bottom of the archive.
static uintx object_delta(void* obj) {
assert(DumpSharedSpaces, "supported only for dumping");
assert(shared_rs()->contains(obj), "must be");
address base_address = address(shared_rs()->base());
uintx delta = address(obj) - base_address;
return delta;
}
static void set_archive_loading_failed() {
_archive_loading_failed = true;
@ -171,25 +111,26 @@ class MetaspaceShared : AllStatic {
// Return true if given address is in the shared region corresponding to the idx
static bool is_in_shared_region(const void* p, int idx) NOT_CDS_RETURN_(false);
static bool is_string_region(int idx) {
CDS_ONLY(return (idx >= first_string && idx < first_string + max_strings));
NOT_CDS(return false);
}
static bool is_in_trampoline_frame(address addr) NOT_CDS_RETURN_(false);
static intptr_t* allocate_cpp_vtable_clones(intptr_t* top, intptr_t* end);
static void allocate_cpp_vtable_clones();
static intptr_t* clone_cpp_vtables(intptr_t* p);
static void zero_cpp_vtable_clones_for_writing();
static void patch_cpp_vtable_pointers();
static bool is_valid_shared_method(const Method* m) NOT_CDS_RETURN_(false);
static void serialize(SerializeClosure* sc);
static void serialize(SerializeClosure* sc, GrowableArray<MemRegion> *string_space,
size_t* space_size);
static MetaspaceSharedStats* stats() {
return &_stats;
}
static void report_out_of_space(const char* name, size_t needed_bytes);
// JVM/TI RedefineClasses() support:
// Remap the shared readonly space to shared readwrite, private if
// sharing is enabled. Simply returns true if sharing is not enabled
@ -206,13 +147,21 @@ class MetaspaceShared : AllStatic {
static void link_and_cleanup_shared_classes(TRAPS);
static void check_shared_class_loader_type(Klass* obj);
static int count_class(const char* classlist_file);
static void estimate_regions_size() NOT_CDS_RETURN;
// Allocate a block of memory from the "mc", "ro", or "rw" regions.
static char* misc_code_space_alloc(size_t num_bytes);
static char* read_only_space_alloc(size_t num_bytes);
// Allocate a block of memory from the "mc", "md", or "od" regions.
static char* misc_code_space_alloc(size_t num_bytes) { return _mc.alloc(num_bytes); }
static char* misc_data_space_alloc(size_t num_bytes) { return _md.alloc(num_bytes); }
static char* optional_data_space_alloc(size_t num_bytes) { return _od.alloc(num_bytes); }
template <typename T>
static Array<T>* new_ro_array(int length) {
#if INCLUDE_CDS
size_t byte_size = Array<T>::byte_sizeof(length, sizeof(T));
Array<T>* array = (Array<T>*)read_only_space_alloc(byte_size);
array->initialize(length);
return array;
#else
return NULL;
#endif
}
static address cds_i2i_entry_code_buffers(size_t total_size);
@ -222,18 +171,6 @@ class MetaspaceShared : AllStatic {
static size_t cds_i2i_entry_code_buffers_size() {
return _cds_i2i_entry_code_buffers_size;
}
static SharedMiscRegion* misc_code_region() {
assert(DumpSharedSpaces, "used during dumping only");
return &_mc;
}
static SharedMiscRegion* misc_data_region() {
assert(DumpSharedSpaces, "used during dumping only");
return &_md;
}
static SharedMiscRegion* optional_data_region() {
assert(DumpSharedSpaces, "used during dumping only");
return &_od;
}
static void relocate_klass_ptr(oop o);
};
#endif // SHARE_VM_MEMORY_METASPACESHARED_HPP

View File

@ -44,6 +44,7 @@
#include "logging/logStream.hpp"
#include "memory/filemap.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
@ -223,6 +224,37 @@ void Universe::oops_do(OopClosure* f, bool do_all) {
debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);)
}
void LatestMethodCache::metaspace_pointers_do(MetaspaceClosure* it) {
it->push(&_klass);
}
void Universe::metaspace_pointers_do(MetaspaceClosure* it) {
it->push(&_boolArrayKlassObj);
it->push(&_byteArrayKlassObj);
it->push(&_charArrayKlassObj);
it->push(&_intArrayKlassObj);
it->push(&_shortArrayKlassObj);
it->push(&_longArrayKlassObj);
it->push(&_singleArrayKlassObj);
it->push(&_doubleArrayKlassObj);
for (int i = 0; i < T_VOID+1; i++) {
it->push(&_typeArrayKlassObjs[i]);
}
it->push(&_objectArrayKlassObj);
it->push(&_the_empty_int_array);
it->push(&_the_empty_short_array);
it->push(&_the_empty_klass_array);
it->push(&_the_empty_method_array);
it->push(&_the_array_interfaces_array);
_finalizer_register_cache->metaspace_pointers_do(it);
_loader_addClass_cache->metaspace_pointers_do(it);
_pd_implies_cache->metaspace_pointers_do(it);
_throw_illegal_access_error_cache->metaspace_pointers_do(it);
_do_stack_walk_cache->metaspace_pointers_do(it);
}
// Serialize metadata in and out of CDS archive, not oops.
void Universe::serialize(SerializeClosure* f, bool do_all) {

View File

@ -67,6 +67,7 @@ class LatestMethodCache : public CHeapObj<mtClass> {
void serialize(SerializeClosure* f) {
f->do_ptr((void**)&_klass);
}
void metaspace_pointers_do(MetaspaceClosure* it);
};
@ -102,6 +103,7 @@ class Universe: AllStatic {
friend class VMStructs;
friend class VM_PopulateDumpSharedSpace;
friend class Metaspace;
friend class MetaspaceShared;
friend jint universe_init();
friend void universe2_init();
@ -474,6 +476,7 @@ class Universe: AllStatic {
// Apply "f" to all klasses for basic types (classes not present in
// SystemDictionary).
static void basic_type_classes_do(void f(Klass*));
static void metaspace_pointers_do(MetaspaceClosure* it);
// Debugging
enum VERIFY_FLAGS {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,6 +63,7 @@ class ReservedSpace VALUE_OBJ_CLASS_SPEC {
// Accessors
char* base() const { return _base; }
size_t size() const { return _size; }
char* end() const { return _base + _size; }
size_t alignment() const { return _alignment; }
bool special() const { return _special; }
bool executable() const { return _executable; }
@ -85,6 +86,9 @@ class ReservedSpace VALUE_OBJ_CLASS_SPEC {
static size_t page_align_size_down(size_t size);
static size_t allocation_align_size_up(size_t size);
static size_t allocation_align_size_down(size_t size);
bool contains(const void* p) const {
return (base() <= ((char*)p)) && (((char*)p) < (base() + size()));
}
};
ReservedSpace

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,8 +24,10 @@
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "logging/log.hpp"
#include "memory/heapInspection.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/oopFactory.hpp"
#include "oops/annotations.hpp"
#include "oops/instanceKlass.hpp"
@ -33,7 +35,7 @@
// Allocate annotations in metadata area
Annotations* Annotations::allocate(ClassLoaderData* loader_data, TRAPS) {
return new (loader_data, size(), true, MetaspaceObj::AnnotationType, THREAD) Annotations();
return new (loader_data, size(), MetaspaceObj::AnnotationsType, THREAD) Annotations();
}
// helper
@ -74,6 +76,13 @@ typeArrayOop Annotations::make_java_array(AnnotationArray* annotations, TRAPS) {
}
}
void Annotations::metaspace_pointers_do(MetaspaceClosure* it) {
log_trace(cds)("Iter(Annotations): %p", this);
it->push(&_class_annotations);
it->push(&_fields_annotations);
it->push(&_class_type_annotations);
it->push(&_fields_type_annotations); // FIXME: need a test case where _fields_type_annotations != NULL
}
void Annotations::print_value_on(outputStream* st) const {
st->print("Anotations(" INTPTR_FORMAT ")", p2i(this));

View File

@ -43,6 +43,8 @@ typedef Array<u1> AnnotationArray;
// a type_annotation instance.
class Annotations: public MetaspaceObj {
// If you add a new field that points to any metaspace object, you
// must add this field to Annotations::metaspace_pointers_do().
// Annotations for this class, or null if none.
AnnotationArray* _class_annotations;
@ -63,6 +65,10 @@ class Annotations: public MetaspaceObj {
// Sizing (in words)
static int size() { return sizeof(Annotations) / wordSize; }
// Annotations should be stored in the read-only region of CDS archive.
static bool is_read_only_by_default() { return true; }
#if INCLUDE_SERVICES
void collect_statistics(KlassSizeStats *sz) const;
#endif
@ -87,6 +93,9 @@ class Annotations: public MetaspaceObj {
static typeArrayOop make_java_array(AnnotationArray* annotations, TRAPS);
bool is_klass() const { return false; }
void metaspace_pointers_do(MetaspaceClosure* it);
MetaspaceObj::Type type() const { return AnnotationsType; }
private:
static julong count_bytes(Array<AnnotationArray*>* p);
public:

View File

@ -36,6 +36,7 @@
template <typename T>
class Array: public MetaspaceObj {
friend class MetadataFactory;
friend class MetaspaceShared;
friend class VMStructs;
friend class JVMCIVMStructs;
friend class MethodHandleCompiler; // special case
@ -53,13 +54,16 @@ protected:
Array(const Array<T>&);
void operator=(const Array<T>&);
void* operator new(size_t size, ClassLoaderData* loader_data, int length, bool read_only, TRAPS) throw() {
void* operator new(size_t size, ClassLoaderData* loader_data, int length, TRAPS) throw() {
size_t word_size = Array::size(length);
return (void*) Metaspace::allocate(loader_data, word_size, read_only,
return (void*) Metaspace::allocate(loader_data, word_size,
MetaspaceObj::array_type(sizeof(T)), THREAD);
}
static size_t byte_sizeof(int length) { return sizeof(Array<T>) + MAX2(length - 1, 0) * sizeof(T); }
static size_t byte_sizeof(int length, size_t elm_byte_size) {
return sizeof(Array<T>) + MAX2(length - 1, 0) * elm_byte_size;
}
static size_t byte_sizeof(int length) { return byte_sizeof(length, sizeof(T)); }
// WhiteBox API helper.
// Can't distinguish between array of length 0 and length 1,
@ -130,6 +134,9 @@ protected:
return (int)words;
}
static int size(int length, int elm_byte_size) {
return align_size_up(byte_sizeof(length, elm_byte_size), BytesPerWord) / BytesPerWord; // FIXME
}
int size() {
return size(_length);

View File

@ -29,6 +29,7 @@
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/gcLocker.hpp"
#include "jvmtifiles/jvmti.h"
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.inline.hpp"
#include "oops/arrayKlass.hpp"
@ -173,6 +174,17 @@ jint ArrayKlass::jvmti_class_status() const {
return JVMTI_CLASS_STATUS_ARRAY;
}
void ArrayKlass::metaspace_pointers_do(MetaspaceClosure* it) {
Klass::metaspace_pointers_do(it);
ResourceMark rm;
log_trace(cds)("Iter(ArrayKlass): %p (%s)", this, external_name());
// need to cast away volatile
it->push((Klass**)&_higher_dimension);
it->push((Klass**)&_lower_dimension);
}
void ArrayKlass::remove_unshareable_info() {
Klass::remove_unshareable_info();
}

View File

@ -36,6 +36,8 @@ class klassVtable;
class ArrayKlass: public Klass {
friend class VMStructs;
private:
// If you add a new field that points to any metaspace object, you
// must add this field to ArrayKlass::metaspace_pointers_do().
int _dimension; // This is n'th-dimensional array.
Klass* volatile _higher_dimension; // Refers the (n+1)'th-dimensional array (if present).
Klass* volatile _lower_dimension; // Refers the (n-1)'th-dimensional array (if present).
@ -102,6 +104,8 @@ class ArrayKlass: public Klass {
// Sizing
static int static_size(int header_size);
virtual void metaspace_pointers_do(MetaspaceClosure* iter);
#if INCLUDE_SERVICES
virtual void collect_statistics(KlassSizeStats *sz) const {
Klass::collect_statistics(sz);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "interpreter/interpreter.hpp"
#include "memory/heapInspection.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
#include "oops/constMethod.hpp"
#include "oops/method.hpp"
@ -42,7 +43,7 @@ ConstMethod* ConstMethod::allocate(ClassLoaderData* loader_data,
MethodType method_type,
TRAPS) {
int size = ConstMethod::size(byte_code_size, sizes);
return new (loader_data, size, true, MetaspaceObj::ConstMethodType, THREAD) ConstMethod(
return new (loader_data, size, MetaspaceObj::ConstMethodType, THREAD) ConstMethod(
byte_code_size, sizes, method_type, size);
}
@ -402,6 +403,25 @@ void ConstMethod::copy_annotations_from(ClassLoaderData* loader_data, ConstMetho
}
}
void ConstMethod::metaspace_pointers_do(MetaspaceClosure* it) {
log_trace(cds)("Iter(ConstMethod): %p", this);
it->push(&_constants);
it->push(&_stackmap_data);
if (has_method_annotations()) {
it->push(method_annotations_addr());
}
if (has_parameter_annotations()) {
it->push(parameter_annotations_addr());
}
if (has_type_annotations()) {
it->push(type_annotations_addr());
}
if (has_default_annotations()) {
it->push(default_annotations_addr());
}
}
// Printing
void ConstMethod::print_on(outputStream* st) const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -198,6 +198,9 @@ private:
// multiple threads, so is volatile.
volatile uint64_t _fingerprint;
// If you add a new field that points to any metaspace object, you
// must add this field to ConstMethod::metaspace_pointers_do().
ConstantPool* _constants; // Constant pool
// Raw stackmap data for the method
@ -369,6 +372,10 @@ public:
int size() const { return _constMethod_size;}
void set_constMethod_size(int size) { _constMethod_size = size; }
// ConstMethods should be stored in the read-only region of CDS archive.
static bool is_read_only_by_default() { return true; }
#if INCLUDE_SERVICES
void collect_statistics(KlassSizeStats *sz) const;
#endif
@ -529,6 +536,8 @@ public:
bool is_klass() const { return false; }
DEBUG_ONLY(bool on_stack() { return false; })
void metaspace_pointers_do(MetaspaceClosure* it);
MetaspaceObj::Type type() const { return ConstMethodType; }
private:
// Since the size of the compressed line number table is unknown, the
// offsets of the other variable sized sections are computed backwards

View File

@ -32,6 +32,7 @@
#include "interpreter/linkResolver.hpp"
#include "memory/heapInspection.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/constantPool.hpp"
@ -48,9 +49,9 @@
#include "utilities/copy.hpp"
ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, TRAPS) {
Array<u1>* tags = MetadataFactory::new_writeable_array<u1>(loader_data, length, 0, CHECK_NULL);
Array<u1>* tags = MetadataFactory::new_array<u1>(loader_data, length, 0, CHECK_NULL);
int size = ConstantPool::size(length);
return new (loader_data, size, true, MetaspaceObj::ConstantPoolType, THREAD) ConstantPool(tags);
return new (loader_data, size, MetaspaceObj::ConstantPoolType, THREAD) ConstantPool(tags);
}
#ifdef ASSERT
@ -108,6 +109,26 @@ void ConstantPool::release_C_heap_structures() {
unreference_symbols();
}
void ConstantPool::metaspace_pointers_do(MetaspaceClosure* it) {
log_trace(cds)("Iter(ConstantPool): %p", this);
it->push(&_tags, MetaspaceClosure::_writable);
it->push(&_cache);
it->push(&_pool_holder);
it->push(&_operands);
it->push(&_resolved_klasses, MetaspaceClosure::_writable);
for (int i = 0; i < length(); i++) {
// The only MSO's embedded in the CP entries are Symbols:
// JVM_CONSTANT_String (normal and pseudo)
// JVM_CONSTANT_Utf8
constantTag ctag = tag_at(i);
if (ctag.is_string() || ctag.is_utf8()) {
it->push(symbol_at_addr(i));
}
}
}
objArrayOop ConstantPool::resolved_references() const {
return (objArrayOop)JNIHandles::resolve(_cache->resolved_references());
}
@ -154,7 +175,7 @@ void ConstantPool::allocate_resolved_klasses(ClassLoaderData* loader_data, int n
// UnresolvedKlass entries that are temporarily created during class redefinition.
assert(num_klasses < CPKlassSlot::_temp_resolved_klass_index, "sanity");
assert(resolved_klasses() == NULL, "sanity");
Array<Klass*>* rk = MetadataFactory::new_writeable_array<Klass*>(loader_data, num_klasses, CHECK);
Array<Klass*>* rk = MetadataFactory::new_array<Klass*>(loader_data, num_klasses, CHECK);
set_resolved_klasses(rk);
}

View File

@ -99,6 +99,8 @@ class ConstantPool : public Metadata {
friend class BytecodeInterpreter; // Directly extracts a klass in the pool for fast instanceof/checkcast
friend class Universe; // For null constructor
private:
// If you add a new field that points to any metaspace object, you
// must add this field to ConstantPool::metaspace_pointers_do().
Array<u1>* _tags; // the tag array describing the constant pool's contents
ConstantPoolCache* _cache; // the cache holding interpreter runtime information
InstanceKlass* _pool_holder; // the corresponding class
@ -212,6 +214,9 @@ class ConstantPool : public Metadata {
ConstantPoolCache* cache() const { return _cache; }
void set_cache(ConstantPoolCache* cache){ _cache = cache; }
virtual void metaspace_pointers_do(MetaspaceClosure* iter);
virtual MetaspaceObj::Type type() const { return ConstantPoolType; }
// Create object cache in the constant pool
void initialize_resolved_references(ClassLoaderData* loader_data,
const intStack& reference_map,
@ -765,6 +770,9 @@ class ConstantPool : public Metadata {
void collect_statistics(KlassSizeStats *sz) const;
#endif
// ConstantPools should be stored in the read-only region of CDS archive.
static bool is_read_only_by_default() { return true; }
friend class ClassFileParser;
friend class SystemDictionary;

View File

@ -26,6 +26,7 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/rewriter.hpp"
#include "logging/log.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.inline.hpp"
#include "oops/cpCache.hpp"
@ -566,7 +567,7 @@ ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data,
const int length = index_map.length() + invokedynamic_index_map.length();
int size = ConstantPoolCache::size(length);
return new (loader_data, size, false, MetaspaceObj::ConstantPoolCacheType, THREAD)
return new (loader_data, size, MetaspaceObj::ConstantPoolCacheType, THREAD)
ConstantPoolCache(length, index_map, invokedynamic_index_map, invokedynamic_map);
}
@ -652,6 +653,11 @@ void ConstantPoolCache::dump_cache() {
}
#endif // INCLUDE_JVMTI
void ConstantPoolCache::metaspace_pointers_do(MetaspaceClosure* it) {
log_trace(cds)("Iter(ConstantPoolCache): %p", this);
it->push(&_constant_pool);
it->push(&_reference_map);
}
// Printing

View File

@ -404,6 +404,8 @@ class ConstantPoolCache: public MetaspaceObj {
friend class VMStructs;
friend class MetadataFactory;
private:
// If you add a new field that points to any metaspace object, you
// must add this field to ConstantPoolCache::metaspace_pointers_do().
int _length;
ConstantPool* _constant_pool; // the corresponding constant pool
@ -443,6 +445,8 @@ class ConstantPoolCache: public MetaspaceObj {
bool is_constantPoolCache() const { return true; }
int length() const { return _length; }
void metaspace_pointers_do(MetaspaceClosure* it);
MetaspaceObj::Type type() const { return ConstantPoolCacheType; }
jobject resolved_references() { return _resolved_references; }
void set_resolved_references(jobject s) { _resolved_references = s; }

View File

@ -46,6 +46,7 @@
#include "memory/heapInspection.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
@ -1998,6 +1999,49 @@ void InstanceKlass::store_fingerprint(uint64_t fingerprint) {
}
}
void InstanceKlass::metaspace_pointers_do(MetaspaceClosure* it) {
Klass::metaspace_pointers_do(it);
if (log_is_enabled(Trace, cds)) {
ResourceMark rm;
log_trace(cds)("Iter(InstanceKlass): %p (%s)", this, external_name());
}
it->push(&_annotations);
it->push((Klass**)&_array_klasses);
it->push(&_constants);
it->push(&_inner_classes);
it->push(&_array_name);
#if INCLUDE_JVMTI
it->push(&_previous_versions);
#endif
it->push(&_methods);
it->push(&_default_methods);
it->push(&_local_interfaces);
it->push(&_transitive_interfaces);
it->push(&_method_ordering);
it->push(&_default_vtable_indices);
it->push(&_fields);
if (itable_length() > 0) {
itableOffsetEntry* ioe = (itableOffsetEntry*)start_of_itable();
int method_table_offset_in_words = ioe->offset()/wordSize;
int nof_interfaces = (method_table_offset_in_words - itable_offset_in_words())
/ itableOffsetEntry::size();
for (int i = 0; i < nof_interfaces; i ++, ioe ++) {
if (ioe->interface_klass() != NULL) {
it->push(ioe->interface_klass_addr());
itableMethodEntry* ime = ioe->first_method_entry(this);
int n = klassItable::method_count_for_interface(ioe->interface_klass());
for (int index = 0; index < n; index ++) {
it->push(ime[index].method_addr());
}
}
}
}
}
void InstanceKlass::remove_unshareable_info() {
Klass::remove_unshareable_info();
@ -2018,12 +2062,26 @@ void InstanceKlass::remove_unshareable_info() {
constants()->remove_unshareable_info();
assert(_dep_context == DependencyContext::EMPTY, "dependency context is not shareable");
for (int i = 0; i < methods()->length(); i++) {
Method* m = methods()->at(i);
m->remove_unshareable_info();
}
// These are not allocated from metaspace, but they should should all be empty
// during dump time, so we don't need to worry about them in InstanceKlass::metaspace_pointers_do().
guarantee(_source_debug_extension == NULL, "must be");
guarantee(_oop_map_cache == NULL, "must be");
guarantee(_init_thread == NULL, "must be");
guarantee(_oop_map_cache == NULL, "must be");
guarantee(_jni_ids == NULL, "must be");
guarantee(_methods_jmethod_ids == NULL, "must be");
guarantee(_dep_context == DependencyContext::EMPTY, "must be");
guarantee(_osr_nmethods_head == NULL, "must be");
#if INCLUDE_JVMTI
guarantee(_breakpoints == NULL, "must be");
guarantee(_previous_versions == NULL, "must be");
#endif
}
static void restore_unshareable_in_class(Klass* k, TRAPS) {
@ -3664,11 +3722,15 @@ unsigned char * InstanceKlass::get_cached_class_file_bytes() {
#if INCLUDE_CDS
JvmtiCachedClassFileData* InstanceKlass::get_archived_class_data() {
assert(this->is_shared(), "class should be shared");
if (MetaspaceShared::is_in_shared_space(_cached_class_file)) {
if (DumpSharedSpaces) {
return _cached_class_file;
} else {
return NULL;
assert(this->is_shared(), "class should be shared");
if (MetaspaceShared::is_in_shared_space(_cached_class_file)) {
return _cached_class_file;
} else {
return NULL;
}
}
}
#endif

View File

@ -142,6 +142,9 @@ class InstanceKlass: public Klass {
static InstanceKlass* allocate_instance_klass(const ClassFileParser& parser, TRAPS);
protected:
// If you add a new field that points to any metaspace object, you
// must add this field to InstanceKlass::metaspace_pointers_do().
// Annotations for this class
Annotations* _annotations;
// Package this class is defined in
@ -1341,6 +1344,8 @@ public:
// JVMTI support
jint jvmti_class_status() const;
virtual void metaspace_pointers_do(MetaspaceClosure* iter);
public:
// Printing
#ifndef PRODUCT

View File

@ -31,6 +31,8 @@
#include "logging/log.hpp"
#include "memory/heapInspection.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceKlass.hpp"
@ -162,8 +164,7 @@ Method* Klass::uncached_lookup_method(const Symbol* name, const Symbol* signatur
}
void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw() {
return Metaspace::allocate(loader_data, word_size, /*read_only*/false,
MetaspaceObj::ClassType, THREAD);
return Metaspace::allocate(loader_data, word_size, MetaspaceObj::ClassType, THREAD);
}
// "Normal" instantiation is preceeded by a MetaspaceObj allocation
@ -485,6 +486,29 @@ void Klass::oops_do(OopClosure* cl) {
cl->do_oop(&_java_mirror);
}
void Klass::metaspace_pointers_do(MetaspaceClosure* it) {
if (log_is_enabled(Trace, cds)) {
ResourceMark rm;
log_trace(cds)("Iter(Klass): %p (%s)", this, external_name());
}
it->push(&_name);
it->push(&_secondary_super_cache);
it->push(&_secondary_supers);
for (int i = 0; i < _primary_super_limit; i++) {
it->push(&_primary_supers[i]);
}
it->push(&_super);
it->push(&_subklass);
it->push(&_next_sibling);
it->push(&_next_link);
vtableEntry* vt = start_of_vtable();
for (int i=0; i<vtable_length(); i++) {
it->push(vt[i].method_addr());
}
}
void Klass::remove_unshareable_info() {
assert (DumpSharedSpaces, "only called for DumpSharedSpaces");
TRACE_REMOVE_ID(this);

View File

@ -65,6 +65,9 @@ class Klass : public Metadata {
friend class VMStructs;
friend class JVMCIVMStructs;
protected:
// If you add a new field that points to any metaspace object, you
// must add this field to Klass::metaspace_pointers_do().
// note: put frequently-used fields together at start of klass structure
// for better cache behavior (may not make much of a difference but sure won't hurt)
enum { _primary_super_limit = 8 };
@ -597,6 +600,9 @@ protected:
// garbage collection support
void oops_do(OopClosure* cl);
virtual void metaspace_pointers_do(MetaspaceClosure* iter);
virtual MetaspaceObj::Type type() const { return ClassType; }
// Iff the class loader (or mirror for anonymous classes) is alive the
// Klass is considered alive.
// The is_alive closure passed in depends on the Garbage Collector used.

View File

@ -196,6 +196,7 @@ class vtableEntry VALUE_OBJ_CLASS_SPEC {
static int method_offset_in_bytes() { return offset_of(vtableEntry, _method); }
Method* method() const { return _method; }
Method** method_addr() { return &_method; }
private:
Method* _method;
@ -236,6 +237,7 @@ class itableOffsetEntry VALUE_OBJ_CLASS_SPEC {
int _offset;
public:
Klass* interface_klass() const { return _interface; }
Klass**interface_klass_addr() { return &_interface; }
int offset() const { return _offset; }
static itableMethodEntry* method_entry(Klass* k, int offset) { return (itableMethodEntry*)(((address)k) + offset); }
@ -258,6 +260,7 @@ class itableMethodEntry VALUE_OBJ_CLASS_SPEC {
public:
Method* method() const { return _method; }
Method**method_addr() { return &_method; }
void clear() { _method = NULL; }

View File

@ -48,8 +48,10 @@ class Metadata : public MetaspaceObj {
virtual bool is_methodData() const volatile { return false; }
virtual bool is_constantPool() const volatile { return false; }
virtual bool is_methodCounters() const volatile { return false; }
virtual int size() const = 0;
virtual MetaspaceObj::Type type() const = 0;
virtual const char* internal_name() const = 0;
virtual void metaspace_pointers_do(MetaspaceClosure* iter) {}
void print() const { print_on(tty); }
void print_value() const { print_value_on(tty); }

View File

@ -37,6 +37,7 @@
#include "interpreter/oopMapCache.hpp"
#include "memory/heapInspection.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
@ -79,7 +80,7 @@ Method* Method::allocate(ClassLoaderData* loader_data,
method_type,
CHECK_NULL);
int size = Method::size(access_flags.is_native());
return new (loader_data, size, false, MetaspaceObj::MethodType, THREAD) Method(cm, access_flags);
return new (loader_data, size, MetaspaceObj::MethodType, THREAD) Method(cm, access_flags);
}
Method::Method(ConstMethod* xconst, AccessFlags access_flags) {
@ -305,6 +306,14 @@ Symbol* Method::klass_name() const {
}
void Method::metaspace_pointers_do(MetaspaceClosure* it) {
log_trace(cds)("Iter(Method): %p", this);
it->push(&_constMethod);
it->push(&_method_data);
it->push(&_method_counters);
}
// Attempt to return method oop to original state. Clear any pointers
// (to objects outside the shared spaces). We won't be able to predict
// where they should point in a new JVM. Further initialize some

View File

@ -66,6 +66,8 @@ class Method : public Metadata {
friend class VMStructs;
friend class JVMCIVMStructs;
private:
// If you add a new field that points to any metaspace object, you
// must add this field to Method::metaspace_pointers_do().
ConstMethod* _constMethod; // Method read-only data.
MethodData* _method_data;
MethodCounters* _method_counters;
@ -471,6 +473,9 @@ class Method : public Metadata {
// clear entry points. Used by sharing code during dump time
void unlink_method() NOT_CDS_RETURN;
virtual void metaspace_pointers_do(MetaspaceClosure* iter);
virtual MetaspaceObj::Type type() const { return MethodType; }
// vtable index
enum VtableIndexFlag {
// Valid vtable indexes are non-negative (>= 0).

View File

@ -22,12 +22,13 @@
*
*/
#include "precompiled.hpp"
#include "memory/metaspaceClosure.hpp"
#include "oops/methodCounters.hpp"
#include "runtime/handles.inline.hpp"
MethodCounters* MethodCounters::allocate(const methodHandle& mh, TRAPS) {
ClassLoaderData* loader_data = mh->method_holder()->class_loader_data();
return new(loader_data, size(), false, MetaspaceObj::MethodCountersType, THREAD) MethodCounters(mh);
return new(loader_data, method_counters_size(), MetaspaceObj::MethodCountersType, THREAD) MethodCounters(mh);
}
void MethodCounters::clear_counters() {
@ -73,6 +74,12 @@ void MethodCounters::set_highest_osr_comp_level(int level) {
#endif
}
void MethodCounters::metaspace_pointers_do(MetaspaceClosure* it) {
log_trace(cds)("Iter(MethodCounters): %p", this);
#if INCLUDE_AOT
it->push(&_method);
#endif
}
void MethodCounters::print_value_on(outputStream* st) const {
assert(is_methodCounters(), "must be methodCounters");

View File

@ -35,6 +35,8 @@ class MethodCounters : public Metadata {
friend class VMStructs;
friend class JVMCIVMStructs;
private:
// If you add a new field that points to any metaspace object, you
// must add this field to MethodCounters::metaspace_pointers_do().
#if INCLUDE_AOT
Method* _method; // Back link to Method
#endif
@ -118,10 +120,14 @@ class MethodCounters : public Metadata {
AOT_ONLY(Method* method() const { return _method; })
static int size() {
static int method_counters_size() {
return align_up((int)sizeof(MethodCounters), wordSize) / wordSize;
}
virtual int size() const {
return method_counters_size();
}
void metaspace_pointers_do(MetaspaceClosure* it);
MetaspaceObj::Type type() const { return MethodCountersType; }
void clear_counters();
#if defined(COMPILER2) || INCLUDE_JVMCI

View File

@ -29,6 +29,7 @@
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/linkResolver.hpp"
#include "memory/heapInspection.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
#include "oops/methodData.hpp"
#include "prims/jvmtiRedefineClasses.hpp"
@ -715,7 +716,7 @@ void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) con
MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
int size = MethodData::compute_allocation_size_in_words(method);
return new (loader_data, size, false, MetaspaceObj::MethodDataType, THREAD)
return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
MethodData(method(), size, THREAD);
}
@ -1634,6 +1635,11 @@ bool MethodData::profile_parameters_for_method(const methodHandle& m) {
return m->is_compiled_lambda_form();
}
void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
log_trace(cds)("Iter(MethodData): %p", this);
it->push(&_method);
}
void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
if (shift == 0) {
return;

View File

@ -2154,6 +2154,9 @@ private:
friend class ProfileData;
friend class TypeEntriesAtCall;
// If you add a new field that points to any metaspace object, you
// must add this field to MethodData::metaspace_pointers_do().
// Back pointer to the Method*
Method* _method;
@ -2591,6 +2594,9 @@ public:
return byte_offset_of(MethodData, _parameters_type_data_di);
}
virtual void metaspace_pointers_do(MetaspaceClosure* iter);
virtual MetaspaceObj::Type type() const { return MethodDataType; }
// Deallocation support - no pointer fields to deallocate
void deallocate_contents(ClassLoaderData* loader_data) {}

View File

@ -32,6 +32,7 @@
#include "gc/shared/specialized_oop_closures.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.inline.hpp"
#include "oops/arrayKlass.inline.hpp"
@ -398,6 +399,12 @@ void ObjArrayKlass::initialize(TRAPS) {
bottom_klass()->initialize(THREAD); // dispatches to either InstanceKlass or TypeArrayKlass
}
void ObjArrayKlass::metaspace_pointers_do(MetaspaceClosure* it) {
ArrayKlass::metaspace_pointers_do(it);
it->push(&_element_klass);
it->push(&_bottom_klass);
}
// JVM support
jint ObjArrayKlass::compute_modifier_flags(TRAPS) const {

View File

@ -35,6 +35,8 @@ class ObjArrayKlass : public ArrayKlass {
friend class VMStructs;
friend class JVMCIVMStructs;
private:
// If you add a new field that points to any metaspace object, you
// must add this field to ObjArrayKlass::metaspace_pointers_do().
Klass* _element_klass; // The klass of the elements of this array type
Klass* _bottom_klass; // The one-dimensional type (InstanceKlass or TypeArrayKlass)
@ -80,6 +82,8 @@ class ObjArrayKlass : public ArrayKlass {
// Compute protection domain
oop protection_domain() const { return bottom_klass()->protection_domain(); }
virtual void metaspace_pointers_do(MetaspaceClosure* iter);
private:
// Either oop or narrowOop depending on UseCompressedOops.
// must be called from within ObjArrayKlass.cpp

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,8 @@
#include "precompiled.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/classLoaderData.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/symbol.hpp"
@ -53,13 +55,6 @@ void* Symbol::operator new(size_t sz, int len, Arena* arena, TRAPS) throw() {
return res;
}
void* Symbol::operator new(size_t sz, int len, ClassLoaderData* loader_data, TRAPS) throw() {
address res;
res = (address) Metaspace::allocate(loader_data, size(len), true,
MetaspaceObj::SymbolType, CHECK_NULL);
return res;
}
void Symbol::operator delete(void *p) {
assert(((Symbol*)p)->refcount() == 0, "should not call this");
FreeHeap(p);
@ -235,6 +230,15 @@ void Symbol::decrement_refcount() {
}
}
void Symbol::metaspace_pointers_do(MetaspaceClosure* it) {
if (log_is_enabled(Trace, cds)) {
LogStream trace_stream(Log(cds)::trace());
trace_stream.print("Iter(Symbol): %p ", this);
print_value_on(&trace_stream);
trace_stream.cr();
}
}
void Symbol::print_on(outputStream* st) const {
if (this == NULL) {
st->print_cr("NULL");

View File

@ -119,9 +119,13 @@ class Symbol : public MetaspaceObj {
max_symbol_length = (1 << 16) -1
};
static int byte_size(int length) {
// minimum number of natural words needed to hold these bits (no non-heap version)
return (int)(sizeof(Symbol) + (length > 2 ? length - 2 : 0));
}
static int size(int length) {
// minimum number of natural words needed to hold these bits (no non-heap version)
return (int)heap_word_size(sizeof(Symbol) + (length > 2 ? length - 2 : 0));
return (int)heap_word_size(byte_size(length));
}
void byte_at_put(int index, int value) {
@ -141,6 +145,10 @@ class Symbol : public MetaspaceObj {
const jbyte* base() const { return &_body[0]; }
int size() { return size(utf8_length()); }
int byte_size() { return byte_size(utf8_length()); }
// Symbols should be stored in the read-only region of CDS archive.
static bool is_read_only_by_default() { return true; }
// Returns the largest size symbol we can safely hold.
static int max_length() { return max_symbol_length; }
@ -164,6 +172,9 @@ class Symbol : public MetaspaceObj {
_refcount = PERM_REFCOUNT;
}
}
bool is_permanent() {
return (_refcount == PERM_REFCOUNT);
}
int byte_at(int index) const {
assert(index >=0 && index < _length, "symbol index overflow");
@ -227,6 +238,9 @@ class Symbol : public MetaspaceObj {
const char* as_klass_external_name() const;
const char* as_klass_external_name(char* buf, int size) const;
void metaspace_pointers_do(MetaspaceClosure* it);
MetaspaceObj::Type type() const { return SymbolType; }
// Printing
void print_symbol_on(outputStream* st = NULL) const;
void print_utf8_on(outputStream* st) const;

View File

@ -1454,7 +1454,7 @@ WB_ENTRY(jlong, WB_AllocateMetaspace(JNIEnv* env, jobject wb, jobject class_load
? java_lang_ClassLoader::loader_data(class_loader_oop)
: ClassLoaderData::the_null_class_loader_data();
void* metadata = MetadataFactory::new_writeable_array<u1>(cld, WhiteBox::array_bytes_to_length((size_t)size), thread);
void* metadata = MetadataFactory::new_array<u1>(cld, WhiteBox::array_bytes_to_length((size_t)size), thread);
return (jlong)(uintptr_t)metadata;
WB_END
@ -1553,6 +1553,9 @@ WB_ENTRY(jboolean, WB_MetaspaceShouldConcurrentCollect(JNIEnv* env, jobject wb))
return MetaspaceGC::should_concurrent_collect();
WB_END
WB_ENTRY(jlong, WB_MetaspaceReserveAlignment(JNIEnv* env, jobject wb))
return (jlong)Metaspace::reserve_alignment();
WB_END
WB_ENTRY(void, WB_AssertMatchingSafepointCalls(JNIEnv* env, jobject o, jboolean mutexSafepointValue, jboolean attemptedNoSafepointValue))
Monitor::SafepointCheckRequired sfpt_check_required = mutexSafepointValue ?
@ -1937,6 +1940,7 @@ static JNINativeMethod methods[] = {
{CC"incMetaspaceCapacityUntilGC", CC"(J)J", (void*)&WB_IncMetaspaceCapacityUntilGC },
{CC"metaspaceCapacityUntilGC", CC"()J", (void*)&WB_MetaspaceCapacityUntilGC },
{CC"metaspaceShouldConcurrentCollect", CC"()Z", (void*)&WB_MetaspaceShouldConcurrentCollect },
{CC"metaspaceReserveAlignment", CC"()J", (void*)&WB_MetaspaceReserveAlignment },
{CC"getCPUFeatures", CC"()Ljava/lang/String;", (void*)&WB_GetCPUFeatures },
{CC"getNMethod0", CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
(void*)&WB_GetNMethod },

View File

@ -130,36 +130,3 @@ Flag::Error PerfDataSamplingIntervalFunc(intx value, bool verbose) {
return Flag::SUCCESS;
}
}
static inline Flag::Error sharedConstraintFunc(const char *name, size_t value, size_t taken, bool verbose) {
size_t available = (MAX_SHARED_DELTA-(taken+SHARED_PAGE));
if (value > available) {
CommandLineError::print(verbose,
"%s (" SIZE_FORMAT ") must be "
"smaller than or equal to (" SIZE_FORMAT ")\n",
name, value, available);
return Flag::VIOLATES_CONSTRAINT;
} else {
return Flag::SUCCESS;
}
}
Flag::Error SharedReadWriteSizeConstraintFunc(size_t value, bool verbose) {
size_t taken = (SharedReadOnlySize+SharedMiscDataSize+SharedMiscCodeSize);
return sharedConstraintFunc("SharedReadWriteSize", value, taken, verbose);
}
Flag::Error SharedReadOnlySizeConstraintFunc(size_t value, bool verbose) {
size_t taken = (SharedReadWriteSize+SharedMiscDataSize+SharedMiscCodeSize);
return sharedConstraintFunc("SharedReadOnlySize", value, taken, verbose);
}
Flag::Error SharedMiscDataSizeConstraintFunc(size_t value, bool verbose) {
size_t taken = (SharedReadWriteSize+SharedReadOnlySize+SharedMiscCodeSize);
return sharedConstraintFunc("SharedMiscDataSize", value, taken, verbose);
}
Flag::Error SharedMiscCodeSizeConstraintFunc(size_t value, bool verbose) {
size_t taken = (SharedReadWriteSize+SharedReadOnlySize+SharedMiscDataSize);
return sharedConstraintFunc("SharedMiscCodeSize", value, taken, verbose);
}

View File

@ -45,9 +45,4 @@ Flag::Error BiasedLockingDecayTimeFunc(intx value, bool verbose);
Flag::Error PerfDataSamplingIntervalFunc(intx value, bool verbose);
Flag::Error SharedReadWriteSizeConstraintFunc(size_t value, bool verbose);
Flag::Error SharedReadOnlySizeConstraintFunc(size_t value, bool verbose);
Flag::Error SharedMiscDataSizeConstraintFunc(size_t value, bool verbose);
Flag::Error SharedMiscCodeSizeConstraintFunc(size_t value, bool verbose);
#endif /* SHARE_VM_RUNTIME_COMMANDLINEFLAGCONSTRAINTSRUNTIME_HPP */

View File

@ -3900,25 +3900,17 @@ public:
"If PrintSharedArchiveAndExit is true, also print the shared " \
"dictionary") \
\
product(size_t, SharedReadWriteSize, DEFAULT_SHARED_READ_WRITE_SIZE, \
"Size of read-write space for metadata (in bytes)") \
range(MIN_SHARED_READ_WRITE_SIZE, MAX_SHARED_READ_WRITE_SIZE) \
constraint(SharedReadWriteSizeConstraintFunc,AfterErgo) \
product(size_t, SharedReadWriteSize, 0, \
"Deprecated") \
\
product(size_t, SharedReadOnlySize, DEFAULT_SHARED_READ_ONLY_SIZE, \
"Size of read-only space for metadata (in bytes)") \
range(MIN_SHARED_READ_ONLY_SIZE, MAX_SHARED_READ_ONLY_SIZE) \
constraint(SharedReadOnlySizeConstraintFunc,AfterErgo) \
product(size_t, SharedReadOnlySize, 0, \
"Deprecated") \
\
product(size_t, SharedMiscDataSize, DEFAULT_SHARED_MISC_DATA_SIZE, \
"Size of the shared miscellaneous data area (in bytes)") \
range(MIN_SHARED_MISC_DATA_SIZE, MAX_SHARED_MISC_DATA_SIZE) \
constraint(SharedMiscDataSizeConstraintFunc,AfterErgo) \
product(size_t, SharedMiscDataSize, 0, \
"Deprecated") \
\
product(size_t, SharedMiscCodeSize, DEFAULT_SHARED_MISC_CODE_SIZE, \
"Size of the shared miscellaneous code area (in bytes)") \
range(MIN_SHARED_MISC_CODE_SIZE, MAX_SHARED_MISC_CODE_SIZE) \
constraint(SharedMiscCodeSizeConstraintFunc,AfterErgo) \
product(size_t, SharedMiscCodeSize, 0, \
"Deprecated") \
\
product(size_t, SharedBaseAddress, LP64_ONLY(32*G) \
NOT_LP64(LINUX_ONLY(2*G) NOT_LINUX(0)), \

View File

@ -3111,8 +3111,8 @@ void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
void CDSAdapterHandlerEntry::init() {
assert(DumpSharedSpaces, "used during dump time only");
_c2i_entry_trampoline = (address)MetaspaceShared::misc_data_space_alloc(SharedRuntime::trampoline_size());
_adapter_trampoline = (AdapterHandlerEntry**)MetaspaceShared::misc_data_space_alloc(sizeof(AdapterHandlerEntry*));
_c2i_entry_trampoline = (address)MetaspaceShared::misc_code_space_alloc(SharedRuntime::trampoline_size());
_adapter_trampoline = (AdapterHandlerEntry**)MetaspaceShared::misc_code_space_alloc(sizeof(AdapterHandlerEntry*));
};
#endif // INCLUDE_CDS

View File

@ -275,42 +275,6 @@ void report_untested(const char* file, int line, const char* message) {
#endif // !PRODUCT
}
void report_out_of_shared_space(SharedSpaceType shared_space) {
if (shared_space == SharedOptional) {
// The estimated shared_optional_space size is large enough
// for all class bytes. It should not run out of space.
ShouldNotReachHere();
}
static const char* name[] = {
"shared read only space",
"shared read write space",
"shared miscellaneous data space",
"shared miscellaneous code space"
};
static const char* flag[] = {
"SharedReadOnlySize",
"SharedReadWriteSize",
"SharedMiscDataSize",
"SharedMiscCodeSize"
};
warning("\nThe %s is not large enough\n"
"to preload requested classes. Use -XX:%s=<size>\n"
"to increase the initial size of %s.\n",
name[shared_space], flag[shared_space], name[shared_space]);
exit(2);
}
void report_insufficient_metaspace(size_t required_size) {
warning("\nThe MaxMetaspaceSize of " SIZE_FORMAT " bytes is not large enough.\n"
"Either don't specify the -XX:MaxMetaspaceSize=<size>\n"
"or increase the size to at least " SIZE_FORMAT ".\n",
MaxMetaspaceSize, required_size);
exit(2);
}
void report_java_out_of_memory(const char* message) {
static jint out_of_memory_reported = 0;

View File

@ -184,19 +184,6 @@ template<> struct STATIC_ASSERT_FAILURE<true> { enum { value = 1 }; };
typedef char PASTE_TOKENS(STATIC_ASSERT_DUMMY_TYPE_, __LINE__)[ \
STATIC_ASSERT_FAILURE< (Cond) >::value ]
// out of shared space reporting
enum SharedSpaceType {
SharedReadOnly,
SharedReadWrite,
SharedMiscData,
SharedMiscCode,
SharedOptional
};
void report_out_of_shared_space(SharedSpaceType space_type);
void report_insufficient_metaspace(size_t required_size);
// out of memory reporting
void report_java_out_of_memory(const char* message);

View File

@ -198,30 +198,39 @@ template <MEMFLAGS F> void BasicHashtable<F>::bulk_free_entries(BucketUnlinkCont
}
Atomic::add(-context->_num_removed, &_number_of_entries);
}
// Copy the table to the shared space.
template <MEMFLAGS F> size_t BasicHashtable<F>::count_bytes_for_table() {
size_t bytes = 0;
bytes += sizeof(intptr_t); // len
template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char** top, char* end) {
for (int i = 0; i < _table_size; ++i) {
for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr();
*p != NULL;
p = (*p)->next_addr()) {
bytes += entry_size();
}
}
// Dump the hash table entries.
return bytes;
}
intptr_t *plen = (intptr_t*)(*top);
*top += sizeof(*plen);
// Dump the hash table entries (into CDS archive)
template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char* top, char* end) {
assert(is_aligned(top, sizeof(intptr_t)), "bad alignment");
intptr_t *plen = (intptr_t*)(top);
top += sizeof(*plen);
int i;
for (i = 0; i < _table_size; ++i) {
for (BasicHashtableEntry<F>** p = _buckets[i].entry_addr();
*p != NULL;
p = (*p)->next_addr()) {
if (*top + entry_size() > end) {
report_out_of_shared_space(SharedMiscData);
}
*p = (BasicHashtableEntry<F>*)memcpy(*top, (void*)*p, entry_size());
*top += entry_size();
*p != NULL;
p = (*p)->next_addr()) {
*p = (BasicHashtableEntry<F>*)memcpy(top, (void*)*p, entry_size());
top += entry_size();
}
}
*plen = (char*)(*top) - (char*)plen - sizeof(*plen);
*plen = (char*)(top) - (char*)plen - sizeof(*plen);
assert(top == end, "count_bytes_for_table is wrong");
// Set the shared bit.
for (i = 0; i < _table_size; ++i) {
@ -272,7 +281,7 @@ template <class T, MEMFLAGS F> void Hashtable<T, F>::print_table_statistics(outp
for (int i = 0; i < this->table_size(); ++i) {
int count = 0;
for (HashtableEntry<T, F>* e = this->bucket(i);
e != NULL; e = e->next()) {
e != NULL; e = e->next()) {
count++;
literal_bytes += literal_size(e->literal());
}
@ -305,19 +314,29 @@ template <class T, MEMFLAGS F> void Hashtable<T, F>::print_table_statistics(outp
// Dump the hash table buckets.
template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char** top, char* end) {
template <MEMFLAGS F> size_t BasicHashtable<F>::count_bytes_for_buckets() {
size_t bytes = 0;
bytes += sizeof(intptr_t); // len
bytes += sizeof(intptr_t); // _number_of_entries
bytes += _table_size * sizeof(HashtableBucket<F>); // the buckets
return bytes;
}
// Dump the buckets (into CDS archive)
template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char* top, char* end) {
assert(is_aligned(top, sizeof(intptr_t)), "bad alignment");
intptr_t len = _table_size * sizeof(HashtableBucket<F>);
*(intptr_t*)(*top) = len;
*top += sizeof(intptr_t);
*(intptr_t*)(top) = len;
top += sizeof(intptr_t);
*(intptr_t*)(*top) = _number_of_entries;
*top += sizeof(intptr_t);
*(intptr_t*)(top) = _number_of_entries;
top += sizeof(intptr_t);
if (*top + len > end) {
report_out_of_shared_space(SharedMiscData);
}
_buckets = (HashtableBucket<F>*)memcpy(*top, (void*)_buckets, len);
*top += len;
_buckets = (HashtableBucket<F>*)memcpy(top, (void*)_buckets, len);
top += len;
assert(top == end, "count_bytes_for_buckets is wrong");
}
#ifndef PRODUCT
@ -397,6 +416,7 @@ template class Hashtable<Symbol*, mtClass>;
template class HashtableEntry<Symbol*, mtSymbol>;
template class HashtableEntry<Symbol*, mtClass>;
template class HashtableEntry<oop, mtSymbol>;
template class HashtableBucket<mtClass>;
template class BasicHashtableEntry<mtSymbol>;
template class BasicHashtableEntry<mtCode>;
template class BasicHashtable<mtClass>;

View File

@ -148,8 +148,10 @@ public:
HashtableBucket<F>* buckets, int number_of_entries);
// Sharing support.
void copy_buckets(char** top, char* end);
void copy_table(char** top, char* end);
size_t count_bytes_for_buckets();
size_t count_bytes_for_table();
void copy_buckets(char* top, char* end);
void copy_table(char* top, char* end);
// Bucket handling
int hash_to_index(unsigned int full_hash) const {

View File

@ -77,12 +77,6 @@ public class TestOptionsWithRanges {
allOptionsAsMap = JVMOptionsUtils.getOptionsWithRangeAsMap(origin -> (!(origin.contains("develop") || origin.contains("notproduct"))));
/* Shared flags can cause JVM to exit with error code 2 */
setAllowedExitCodes("SharedReadWriteSize", 2);
setAllowedExitCodes("SharedReadOnlySize", 2);
setAllowedExitCodes("SharedMiscDataSize", 2);
setAllowedExitCodes("SharedMiscCodeSize", 2);
/*
* Remove CICompilerCount from testing because currently it can hang system
*/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -232,10 +232,6 @@ public class JVMOptionsUtils {
case "NewSizeThreadIncrease":
option.addPrepend("-XX:+UseSerialGC");
break;
case "SharedReadWriteSize":
case "SharedReadOnlySize":
case "SharedMiscDataSize":
case "SharedMiscCodeSize":
case "SharedBaseAddress":
case "SharedSymbolTableBucketSize":
option.addPrepend("-XX:+UnlockDiagnosticVMOptions");

View File

@ -1,86 +0,0 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test LargeSharedSpace
* @bug 8168790 8169870
* @summary Test CDS dumping using specific space size without crashing.
* The space size used in the test might not be suitable on windows.
* @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
* @requires (os.family != "windows")
* @library /test/lib
* @modules java.base/jdk.internal.misc
* java.management
* @run main LargeSharedSpace
*/
import jdk.test.lib.cds.CDSTestUtils;
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.Platform;
public class LargeSharedSpace {
public static void main(String[] args) throws Exception {
OutputAnalyzer output;
// Test case 1: -XX:SharedMiscCodeSize=1066924031
//
// The archive should be dumped successfully. It might fail to reserve memory
// for shared space under low memory condition. The dumping process should not crash.
output = CDSTestUtils.createArchive("-XX:SharedMiscCodeSize=1066924031",
"-XX:+UnlockDiagnosticVMOptions");
try {
output.shouldContain("Loading classes to share");
} catch (RuntimeException e1) {
output.shouldContain("Unable to allocate memory for shared space");
}
// Test case 2: -XX:SharedMiscCodeSize=1600386047
//
// On 64-bit platform, compressed class pointer is used. When the combined
// shared space size and the compressed space size is larger than the 4G
// compressed klass limit (0x100000000), error is reported.
//
// The dumping process should not crash.
if (Platform.is64bit()) {
CDSTestUtils.createArchive(
"-XX:+UseCompressedClassPointers", "-XX:CompressedClassSpaceSize=3G",
"-XX:SharedMiscCodeSize=1600386047")
.shouldContain("larger than compressed klass limit");
}
// Test case 3: -XX:SharedMiscCodeSize=1600386047
//
// On 32-bit platform, compressed class pointer is not used. It may fail
// to reserve memory under low memory condition.
//
// The dumping process should not crash.
if (Platform.is32bit()) {
output = CDSTestUtils.createArchive("-XX:SharedMiscCodeSize=1600386047");
try {
output.shouldContain("Loading classes to share");
} catch (RuntimeException e3) {
output.shouldContain("Unable to allocate memory for shared space");
}
}
}
}

View File

@ -1,197 +0,0 @@
/*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/* @test LimitSharedSizes
* @summary Test handling of limits on shared space size
* @requires (vm.opt.UseCompressedOops == null) | (vm.opt.UseCompressedOops == true)
* @library /test/lib /runtime/CommandLine/OptionsValidation/common
* @modules java.base/jdk.internal.misc
* java.management
* jdk.attach/sun.tools.attach
* @run main LimitSharedSizes
*/
import jdk.test.lib.cds.CDSTestUtils;
import jdk.test.lib.process.ProcessTools;
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.Platform;
import optionsvalidation.JVMOptionsUtils;
public class LimitSharedSizes {
static enum Result {
OUT_OF_RANGE,
TOO_SMALL,
VALID,
VALID_ARCHIVE
}
static enum Region {
RO, RW, MD, MC
}
private static final boolean fitsRange(String name, String value) throws RuntimeException {
boolean fits = true;
try {
fits = JVMOptionsUtils.fitsRange(name, value);
} catch (Exception e) {
throw new RuntimeException(e.getMessage());
}
return fits;
}
private static class SharedSizeTestData {
public String optionName;
public String optionValue;
public Result optionResult;
public SharedSizeTestData(Region region, String value) {
optionName = "-XX:"+getName(region);
optionValue = value;
if (fitsRange(getName(region), value) == false) {
optionResult = Result.OUT_OF_RANGE;
} else {
optionResult = Result.TOO_SMALL;
}
}
public SharedSizeTestData(Region region, String value, Result result) {
optionName = "-XX:"+getName(region);
optionValue = value;
optionResult = result;
}
private String getName(Region region) {
String name;
switch (region) {
case RO:
name = "SharedReadOnlySize";
break;
case RW:
name = "SharedReadWriteSize";
break;
case MD:
name = "SharedMiscDataSize";
break;
case MC:
name = "SharedMiscCodeSize";
break;
default:
name = "Unknown";
break;
}
return name;
}
public Result getResult() {
return optionResult;
}
}
private static final SharedSizeTestData[] testTable = {
// Too small of a region size should not cause a vm crash.
// It should result in an error message either like the following #1:
// The shared miscellaneous code space is not large enough
// to preload requested classes. Use -XX:SharedMiscCodeSize=
// to increase the initial size of shared miscellaneous code space.
// or #2:
// The shared miscellaneous code space is outside the allowed range
new SharedSizeTestData(Region.RO, "4M"),
new SharedSizeTestData(Region.RW, "4M"),
new SharedSizeTestData(Region.MD, "50k"),
new SharedSizeTestData(Region.MC, "20k"),
// these values are larger than default ones, and should
// be acceptable and not cause failure
new SharedSizeTestData(Region.RO, "20M", Result.VALID),
new SharedSizeTestData(Region.RW, "20M", Result.VALID),
new SharedSizeTestData(Region.MD, "20M", Result.VALID),
new SharedSizeTestData(Region.MC, "20M", Result.VALID),
// test with sizes which just meet the minimum required sizes
// the following tests also attempt to use the shared archive
new SharedSizeTestData(Region.RO, Platform.is64bit() ? "14M":"9M", Result.VALID_ARCHIVE),
new SharedSizeTestData(Region.RW, Platform.is64bit() ? "12M":"7M", Result.VALID_ARCHIVE),
new SharedSizeTestData(Region.MD, Platform.is64bit() ? "4M":"2M", Result.VALID_ARCHIVE),
new SharedSizeTestData(Region.MC, "120k", Result.VALID_ARCHIVE),
};
public static void main(String[] args) throws Exception {
int counter = 0;
for (SharedSizeTestData td : testTable) {
String fileName = "LimitSharedSizes" + counter + ".jsa";
counter++;
String option = td.optionName + "=" + td.optionValue;
System.out.println("testing option number <" + counter + ">");
System.out.println("testing option <" + option + ">");
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./" + fileName,
option,
"-Xshare:dump");
OutputAnalyzer output = CDSTestUtils.executeAndLog(pb, "dump" + counter);
switch (td.getResult()) {
case VALID:
case VALID_ARCHIVE:
{
output.shouldNotContain("space is not large enough");
output.shouldHaveExitValue(0);
if (td.getResult() == Result.VALID_ARCHIVE) {
// try to use the archive
pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./" + fileName,
"-XX:+PrintSharedArchiveAndExit",
"-version");
output = CDSTestUtils.executeAndLog(pb, "use" + counter);
if(CDSTestUtils.isUnableToMap(output)) {
System.out.println("Unable to use shared archive: " +
"test not executed; assumed passed");
continue;
} else {
output.shouldHaveExitValue(0);
}
}
}
break;
case TOO_SMALL:
{
output.shouldContain("space is not large enough");
output.shouldHaveExitValue(2);
}
break;
case OUT_OF_RANGE:
{
output.shouldContain("outside the allowed range");
output.shouldHaveExitValue(1);
}
break;
}
}
}
}

View File

@ -35,11 +35,9 @@ import jdk.test.lib.process.ProcessTools;
public class MaxMetaspaceSize {
public static void main(String[] args) throws Exception {
String msg = "is not large enough.\n" +
"Either don't specify the -XX:MaxMetaspaceSize=<size>\n" +
"or increase the size to at least";
String msg = "OutOfMemoryError: Metaspace";
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:MaxMetaspaceSize=10m", "-Xshare:dump");
CDSTestUtils.executeAndLog(pb, "dump").shouldContain(msg).shouldHaveExitValue(2);
"-XX:MaxMetaspaceSize=1m", "-Xshare:dump");
CDSTestUtils.executeAndLog(pb, "dump").shouldContain(msg).shouldHaveExitValue(1);
}
}

View File

@ -38,6 +38,11 @@ public class SharedStringsWb {
String s = "<init>";
String internedS = s.intern();
// Check that it's a valid string
if (s.getClass() != String.class || !(s instanceof String)) {
throw new RuntimeException("Shared string is not a valid String: FAIL");
}
if (wb.isShared(internedS)) {
System.out.println("Found shared string, result: PASS");
} else {

View File

@ -28,70 +28,82 @@
* @library /test/lib
* @modules java.base/jdk.internal.misc
* java.management
* @run main SpaceUtilizationCheck
* @build sun.hotspot.WhiteBox
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI SpaceUtilizationCheck
*/
import jdk.test.lib.cds.CDSTestUtils;
import jdk.test.lib.process.OutputAnalyzer;
import sun.hotspot.WhiteBox;
import java.util.regex.Pattern;
import java.util.regex.Matcher;
import java.util.ArrayList;
import java.util.Hashtable;
import java.lang.Integer;
public class SpaceUtilizationCheck {
// Minimum allowed utilization value (percent)
// The goal is to have this number to be 50% for RO and RW regions
// Once that feature is implemented, increase the MIN_UTILIZATION to 50
private static final int MIN_UTILIZATION = 30;
// Only RO and RW regions are considered for this check, since they
// currently account for the bulk of the shared space
private static final int NUMBER_OF_CHECKED_SHARED_REGIONS = 2;
// [1] Each region must have strictly less than
// WhiteBox.metaspaceReserveAlignment() bytes of unused space.
// [2] There must be no gap between two consecutive regions.
public static void main(String[] args) throws Exception {
OutputAnalyzer output = CDSTestUtils.createArchive();
// (1) Default VM arguments
test();
// (2) Use the now deprecated VM arguments. They should have no effect.
test("-XX:SharedReadWriteSize=128M",
"-XX:SharedReadOnlySize=128M",
"-XX:SharedMiscDataSize=128M",
"-XX:SharedMiscCodeSize=128M");
}
static void test(String... extra_options) throws Exception {
OutputAnalyzer output = CDSTestUtils.createArchive(extra_options);
CDSTestUtils.checkDump(output);
Pattern pattern = Pattern.compile("^(..) space: *([0-9]+).* out of *([0-9]+) bytes .* at 0x([0-9a0-f]+)");
WhiteBox wb = WhiteBox.getWhiteBox();
long reserve_alignment = wb.metaspaceReserveAlignment();
System.out.println("Metaspace::reserve_alignment() = " + reserve_alignment);
String stdout = output.getStdout();
ArrayList<String> utilization = findUtilization(stdout);
if (utilization.size() != NUMBER_OF_CHECKED_SHARED_REGIONS )
throw new RuntimeException("The output format of sharing summary has changed");
for(String str : utilization) {
int value = Integer.parseInt(str);
if (value < MIN_UTILIZATION) {
System.out.println(stdout);
throw new RuntimeException("Utilization for one of the regions" +
"is below a threshold of " + MIN_UTILIZATION + "%");
long last_region = -1;
Hashtable<String,String> checked = new Hashtable<>();
for (String line : output.getStdout().split("\n")) {
if (line.contains(" space:") && !line.contains("st space:")) {
Matcher matcher = pattern.matcher(line);
if (matcher.find()) {
String name = matcher.group(1);
if (name.equals("s0") || name.equals("s1")) {
// String regions are listed at the end and they may not be fully occupied.
break;
} else {
System.out.println("Checking " + name + " in : " + line);
checked.put(name, name);
}
long used = Long.parseLong(matcher.group(2));
long capacity = Long.parseLong(matcher.group(3));
long address = Long.parseLong(matcher.group(4), 16);
long unused = capacity - used;
if (unused < 0) {
throw new RuntimeException("Unused space (" + unused + ") less than 0");
}
if (unused > reserve_alignment) {
// [1] Check for unused space
throw new RuntimeException("Unused space (" + unused + ") must be smaller than Metaspace::reserve_alignment() (" +
reserve_alignment + ")");
}
if (last_region >= 0 && address != last_region) {
// [2] Check for no-gap
throw new RuntimeException("Region 0x" + address + " should have started at 0x" + Long.toString(last_region, 16));
}
last_region = address + capacity;
}
}
}
}
public static ArrayList<String> findUtilization(String input) {
ArrayList<String> regions = filterRegionsOfInterest(input.split("\n"));
return filterByPattern(filterByPattern(regions, "bytes \\[.*% used\\]"), "\\d+");
}
private static ArrayList<String> filterByPattern(Iterable<String> input, String pattern) {
ArrayList<String> result = new ArrayList<String>();
for (String str : input) {
Matcher matcher = Pattern.compile(pattern).matcher(str);
if (matcher.find()) {
result.add(matcher.group());
}
if (checked.size() != 5) {
throw new RuntimeException("Must have 5 consecutive, fully utilized regions");
}
return result;
}
private static ArrayList<String> filterRegionsOfInterest(String[] inputLines) {
ArrayList<String> result = new ArrayList<String>();
for (String str : inputLines) {
if (str.contains("ro space:") || str.contains("rw space:")) {
result.add(str);
}
}
return result;
}
}