8261608: Move common CDS archive building code to archiveBuilder.cpp

Reviewed-by: coleenp, ccheung
This commit is contained in:
Ioi Lam 2021-02-15 06:37:39 +00:00
parent 235da6aa04
commit d9744f6536
31 changed files with 502 additions and 637 deletions

View File

@ -27,10 +27,9 @@
#include "classfile/compactHashtable.hpp"
#include "classfile/javaClasses.hpp"
#include "logging/logMessage.hpp"
#include "memory/dynamicArchive.hpp"
#include "memory/archiveBuilder.hpp"
#include "memory/heapShared.inline.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceShared.hpp"
#include "runtime/arguments.hpp"
#include "runtime/globals.hpp"
#include "runtime/vmThread.hpp"
@ -74,11 +73,11 @@ CompactHashtableWriter::~CompactHashtableWriter() {
size_t CompactHashtableWriter::estimate_size(int num_entries) {
int num_buckets = calculate_num_buckets(num_entries);
size_t bucket_bytes = MetaspaceShared::ro_array_bytesize<u4>(num_buckets + 1);
size_t bucket_bytes = ArchiveBuilder::ro_array_bytesize<u4>(num_buckets + 1);
// In worst case, we have no VALUE_ONLY_BUCKET_TYPE, so each entry takes 2 slots
int entries_space = 2 * num_entries;
size_t entry_bytes = MetaspaceShared::ro_array_bytesize<u4>(entries_space);
size_t entry_bytes = ArchiveBuilder::ro_array_bytesize<u4>(entries_space);
return bucket_bytes
+ entry_bytes
@ -109,8 +108,8 @@ void CompactHashtableWriter::allocate_table() {
"Too many entries.");
}
_compact_buckets = MetaspaceShared::new_ro_array<u4>(_num_buckets + 1);
_compact_entries = MetaspaceShared::new_ro_array<u4>(entries_space);
_compact_buckets = ArchiveBuilder::new_ro_array<u4>(_num_buckets + 1);
_compact_entries = ArchiveBuilder::new_ro_array<u4>(entries_space);
_stats->bucket_count = _num_buckets;
_stats->bucket_bytes = align_up(_compact_buckets->size() * BytesPerWord,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,6 +48,10 @@ public:
int hashentry_bytes;
int bucket_count;
int bucket_bytes;
CompactHashtableStats() :
hashentry_count(0), hashentry_bytes(0),
bucket_count(0), bucket_bytes(0) {}
};
#if INCLUDE_CDS

View File

@ -31,7 +31,6 @@
#include "classfile/classLoadInfo.hpp"
#include "classfile/klassFactory.hpp"
#include "memory/filemap.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jvmtiEnvBase.hpp"
#include "prims/jvmtiRedefineClasses.hpp"

View File

@ -33,7 +33,6 @@
#include "memory/archiveUtils.hpp"
#include "memory/filemap.hpp"
#include "memory/heapShared.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/oopHandle.inline.hpp"
@ -385,7 +384,7 @@ static ArchivedModuleEntries* _archive_modules_entries = NULL;
ModuleEntry* ModuleEntry::allocate_archived_entry() const {
assert(is_named(), "unnamed packages/modules are not archived");
ModuleEntry* archived_entry = (ModuleEntry*)MetaspaceShared::read_write_space_alloc(sizeof(ModuleEntry));
ModuleEntry* archived_entry = (ModuleEntry*)ArchiveBuilder::rw_region_alloc(sizeof(ModuleEntry));
memcpy((void*)archived_entry, (void*)this, sizeof(ModuleEntry));
if (_archive_modules_entries == NULL) {
@ -410,7 +409,7 @@ Array<ModuleEntry*>* ModuleEntry::write_growable_array(GrowableArray<ModuleEntry
Array<ModuleEntry*>* archived_array = NULL;
int length = (array == NULL) ? 0 : array->length();
if (length > 0) {
archived_array = MetaspaceShared::new_ro_array<ModuleEntry*>(length);
archived_array = ArchiveBuilder::new_ro_array<ModuleEntry*>(length);
for (int i = 0; i < length; i++) {
ModuleEntry* archived_entry = get_archived_entry(array->at(i));
archived_array->at_put(i, archived_entry);
@ -518,7 +517,7 @@ void ModuleEntryTable::iterate_symbols(MetaspaceClosure* closure) {
}
Array<ModuleEntry*>* ModuleEntryTable::allocate_archived_entries() {
Array<ModuleEntry*>* archived_modules = MetaspaceShared::new_rw_array<ModuleEntry*>(number_of_entries());
Array<ModuleEntry*>* archived_modules = ArchiveBuilder::new_rw_array<ModuleEntry*>(number_of_entries());
int n = 0;
for (int i = 0; i < table_size(); ++i) {
for (ModuleEntry* m = bucket(i); m != NULL; m = m->next()) {

View File

@ -30,7 +30,6 @@
#include "logging/log.hpp"
#include "memory/archiveBuilder.hpp"
#include "memory/archiveUtils.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "oops/array.hpp"
#include "oops/symbol.hpp"
@ -209,7 +208,7 @@ static ArchivedPackageEntries* _archived_packages_entries = NULL;
PackageEntry* PackageEntry::allocate_archived_entry() const {
assert(!in_unnamed_module(), "unnamed packages/modules are not archived");
PackageEntry* archived_entry = (PackageEntry*)MetaspaceShared::read_write_space_alloc(sizeof(PackageEntry));
PackageEntry* archived_entry = (PackageEntry*)ArchiveBuilder::rw_region_alloc(sizeof(PackageEntry));
memcpy((void*)archived_entry, (void*)this, sizeof(PackageEntry));
if (_archived_packages_entries == NULL) {
@ -279,7 +278,7 @@ Array<PackageEntry*>* PackageEntryTable::allocate_archived_entries() {
}
}
Array<PackageEntry*>* archived_packages = MetaspaceShared::new_rw_array<PackageEntry*>(n);
Array<PackageEntry*>* archived_packages = ArchiveBuilder::new_rw_array<PackageEntry*>(n);
for (n = 0, i = 0; i < table_size(); ++i) {
for (PackageEntry* p = bucket(i); p != NULL; p = p->next()) {
if (p->module()->name() != NULL) {

View File

@ -34,7 +34,7 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "memory/archiveBuilder.hpp"
#include "memory/heapShared.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
@ -760,7 +760,7 @@ void StringTable::write_to_archive(const DumpedInternedStrings* dumped_interned_
assert(HeapShared::is_heap_object_archiving_allowed(), "must be");
_shared_table.reset();
CompactHashtableWriter writer(_items_count, &MetaspaceShared::stats()->string);
CompactHashtableWriter writer(_items_count, ArchiveBuilder::string_stats());
// Copy the interned strings into the "string space" within the java heap
CopyToArchive copier(&writer);

View File

@ -32,7 +32,6 @@
#include "memory/archiveBuilder.hpp"
#include "memory/dynamicArchive.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
@ -602,8 +601,7 @@ size_t SymbolTable::estimate_size_for_archive() {
}
void SymbolTable::write_to_archive(GrowableArray<Symbol*>* symbols) {
CompactHashtableWriter writer(int(_items_count),
&MetaspaceShared::stats()->symbol);
CompactHashtableWriter writer(int(_items_count), ArchiveBuilder::symbol_stats());
copy_shared_symbol_table(symbols, &writer);
if (!DynamicDumpSharedSpaces) {
_shared_table.reset();

View File

@ -2038,7 +2038,7 @@ public:
log_info(cds,dynamic)("Archiving hidden %s", info._proxy_klasses->at(0)->external_name());
size_t byte_size = sizeof(RunTimeLambdaProxyClassInfo);
RunTimeLambdaProxyClassInfo* runtime_info =
(RunTimeLambdaProxyClassInfo*)MetaspaceShared::read_only_space_alloc(byte_size);
(RunTimeLambdaProxyClassInfo*)ArchiveBuilder::ro_region_alloc(byte_size);
runtime_info->init(key, info);
unsigned int hash = runtime_info->hash();
u4 delta = _builder->any_to_offset_u4((void*)runtime_info);
@ -2086,7 +2086,7 @@ public:
if (!info.is_excluded() && info.is_builtin() == _is_builtin) {
size_t byte_size = RunTimeSharedClassInfo::byte_size(info._klass, info.num_verifier_constraints(), info.num_loader_constraints());
RunTimeSharedClassInfo* record;
record = (RunTimeSharedClassInfo*)MetaspaceShared::read_only_space_alloc(byte_size);
record = (RunTimeSharedClassInfo*)ArchiveBuilder::ro_region_alloc(byte_size);
record->init(info);
unsigned int hash;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,7 @@
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/arena.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/metaspace.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/os.hpp"
#include "runtime/task.hpp"

View File

@ -27,6 +27,7 @@
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmClasses.hpp"
#include "interpreter/abstractInterpreter.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allStatic.hpp"
@ -44,6 +45,7 @@
#include "runtime/thread.hpp"
#include "utilities/align.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/formatBuffer.hpp"
#include "utilities/hashtable.inline.hpp"
ArchiveBuilder* ArchiveBuilder::_current = NULL;
@ -69,7 +71,7 @@ class AdapterToTrampoline : public ResourceHashtable<
static AdapterToTrampoline* _adapter_to_trampoline = NULL;
ArchiveBuilder::OtherROAllocMark::~OtherROAllocMark() {
char* newtop = ArchiveBuilder::current()->_ro_region->top();
char* newtop = ArchiveBuilder::current()->_ro_region.top();
ArchiveBuilder::alloc_stats()->record_other_type(int(newtop - _oldtop), true);
}
@ -159,37 +161,40 @@ void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) {
_ptrmap.iterate(&relocator, start, end);
}
ArchiveBuilder::ArchiveBuilder(DumpRegion* mc_region, DumpRegion* rw_region, DumpRegion* ro_region)
: _rw_src_objs(), _ro_src_objs(), _src_obj_table(INITIAL_TABLE_SIZE) {
assert(_current == NULL, "must be");
_current = this;
ArchiveBuilder::ArchiveBuilder() :
_current_dump_space(NULL),
_buffer_bottom(NULL),
_last_verified_top(NULL),
_num_dump_regions_used(0),
_other_region_used_bytes(0),
_requested_static_archive_bottom(NULL),
_requested_static_archive_top(NULL),
_requested_dynamic_archive_bottom(NULL),
_requested_dynamic_archive_top(NULL),
_mapped_static_archive_bottom(NULL),
_mapped_static_archive_top(NULL),
_buffer_to_requested_delta(0),
_mc_region("mc", MAX_SHARED_DELTA),
_rw_region("rw", MAX_SHARED_DELTA),
_ro_region("ro", MAX_SHARED_DELTA),
_rw_src_objs(),
_ro_src_objs(),
_src_obj_table(INITIAL_TABLE_SIZE),
_num_instance_klasses(0),
_num_obj_array_klasses(0),
_num_type_array_klasses(0),
_total_closed_heap_region_size(0),
_total_open_heap_region_size(0),
_estimated_metaspaceobj_bytes(0),
_estimated_hashtable_bytes(0),
_estimated_trampoline_bytes(0)
{
_klasses = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<Klass*>(4 * K, mtClassShared);
_symbols = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<Symbol*>(256 * K, mtClassShared);
_special_refs = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<SpecialRefInfo>(24 * K, mtClassShared);
_num_instance_klasses = 0;
_num_obj_array_klasses = 0;
_num_type_array_klasses = 0;
_alloc_stats = new (ResourceObj::C_HEAP, mtClassShared) DumpAllocStats;
_mc_region = mc_region;
_rw_region = rw_region;
_ro_region = ro_region;
_num_dump_regions_used = 0;
_estimated_metaspaceobj_bytes = 0;
_estimated_hashtable_bytes = 0;
_estimated_trampoline_bytes = 0;
_requested_static_archive_bottom = NULL;
_requested_static_archive_top = NULL;
_mapped_static_archive_bottom = NULL;
_mapped_static_archive_top = NULL;
_requested_dynamic_archive_bottom = NULL;
_requested_dynamic_archive_top = NULL;
_buffer_to_requested_delta = 0;
assert(_current == NULL, "must be");
_current = this;
}
ArchiveBuilder::~ArchiveBuilder() {
@ -205,7 +210,10 @@ ArchiveBuilder::~ArchiveBuilder() {
delete _klasses;
delete _symbols;
delete _special_refs;
delete _alloc_stats;
}
bool ArchiveBuilder::is_dumping_full_module_graph() {
return DumpSharedSpaces && MetaspaceShared::use_full_module_graph();
}
class GatherKlassesAndSymbols : public UniqueMetaspaceClosure {
@ -261,7 +269,7 @@ void ArchiveBuilder::gather_klasses_and_symbols() {
GatherKlassesAndSymbols doit(this);
iterate_roots(&doit, /*is_relocating_pointers=*/false);
#if INCLUDE_CDS_JAVA_HEAP
if (DumpSharedSpaces && MetaspaceShared::use_full_module_graph()) {
if (is_dumping_full_module_graph()) {
ClassLoaderDataShared::iterate_symbols(&doit);
}
#endif
@ -335,7 +343,7 @@ size_t ArchiveBuilder::estimate_archive_size() {
size_t dictionary_est = SystemDictionaryShared::estimate_size_for_archive();
_estimated_hashtable_bytes = symbol_table_est + dictionary_est;
_estimated_trampoline_bytes = allocate_method_trampoline_info();
_estimated_trampoline_bytes = collect_method_trampolines();
size_t total = 0;
@ -366,18 +374,18 @@ address ArchiveBuilder::reserve_buffer() {
// buffer_bottom is the lowest address of the 3 core regions (mc, rw, ro) when
// we are copying the class metadata into the buffer.
address buffer_bottom = (address)rs.base();
log_info(cds)("Reserved output buffer space at : " PTR_FORMAT " [" SIZE_FORMAT " bytes]",
log_info(cds)("Reserved output buffer space at " PTR_FORMAT " [" SIZE_FORMAT " bytes]",
p2i(buffer_bottom), buffer_size);
MetaspaceShared::set_shared_rs(rs);
_shared_rs = rs;
MetaspaceShared::init_shared_dump_space(_mc_region);
_buffer_bottom = buffer_bottom;
_last_verified_top = buffer_bottom;
_current_dump_space = _mc_region;
_current_dump_space = &_mc_region;
_num_dump_regions_used = 1;
_other_region_used_bytes = 0;
_current_dump_space->init(&_shared_rs, &_shared_vs);
ArchivePtrMarker::initialize(&_ptrmap, (address*)_mc_region->base(), (address*)_mc_region->top());
ArchivePtrMarker::initialize(&_ptrmap, &_shared_vs);
// The bottom of the static archive should be mapped at this address by default.
_requested_static_archive_bottom = (address)MetaspaceShared::requested_base_address();
@ -520,6 +528,7 @@ void ArchiveBuilder::remember_embedded_pointer_in_copied_obj(MetaspaceClosure::R
void ArchiveBuilder::gather_source_objs() {
ResourceMark rm;
log_info(cds)("Gathering all archivable objects ... ");
gather_klasses_and_symbols();
GatherSortedSourceObjs doit(this);
iterate_sorted_roots(&doit, /*is_relocating_pointers=*/false);
doit.finish();
@ -565,16 +574,61 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
}
}
void ArchiveBuilder::start_dump_space(DumpRegion* next) {
address bottom = _last_verified_top;
address top = (address)(current_dump_space()->top());
_other_region_used_bytes += size_t(top - bottom);
current_dump_space()->pack(next);
_current_dump_space = next;
_num_dump_regions_used ++;
_last_verified_top = (address)(current_dump_space()->top());
}
void ArchiveBuilder::verify_estimate_size(size_t estimate, const char* which) {
address bottom = _last_verified_top;
address top = (address)(current_dump_space()->top());
size_t used = size_t(top - bottom) + _other_region_used_bytes;
int diff = int(estimate) - int(used);
log_info(cds)("%s estimate = " SIZE_FORMAT " used = " SIZE_FORMAT "; diff = %d bytes", which, estimate, used, diff);
assert(diff >= 0, "Estimate is too small");
_last_verified_top = top;
_other_region_used_bytes = 0;
}
void ArchiveBuilder::dump_rw_region() {
ResourceMark rm;
log_info(cds)("Allocating RW objects ... ");
make_shallow_copies(_rw_region, &_rw_src_objs);
start_dump_space(&_rw_region);
make_shallow_copies(&_rw_region, &_rw_src_objs);
#if INCLUDE_CDS_JAVA_HEAP
if (is_dumping_full_module_graph()) {
// Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
char* start = rw_region()->top();
ClassLoaderDataShared::allocate_archived_tables();
alloc_stats()->record_modules(rw_region()->top() - start, /*read_only*/false);
}
#endif
}
void ArchiveBuilder::dump_ro_region() {
ResourceMark rm;
log_info(cds)("Allocating RO objects ... ");
make_shallow_copies(_ro_region, &_ro_src_objs);
start_dump_space(&_ro_region);
make_shallow_copies(&_ro_region, &_ro_src_objs);
#if INCLUDE_CDS_JAVA_HEAP
if (is_dumping_full_module_graph()) {
char* start = ro_region()->top();
ClassLoaderDataShared::init_archived_tables();
alloc_stats()->record_modules(ro_region()->top() - start, /*read_only*/true);
}
#endif
}
void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region,
@ -619,7 +673,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes);
src_info->set_dumped_addr((address)dest);
_alloc_stats->record(ref->msotype(), int(newtop - oldtop), src_info->read_only());
_alloc_stats.record(ref->msotype(), int(newtop - oldtop), src_info->read_only());
}
address ArchiveBuilder::get_dumped_addr(address src_obj) const {
@ -821,6 +875,8 @@ class RelocateBufferToRequested : public BitMapClosure {
void ArchiveBuilder::relocate_to_requested() {
ro_region()->pack();
size_t my_archive_size = buffer_top() - buffer_bottom();
if (DumpSharedSpaces) {
@ -989,9 +1045,9 @@ public:
write_header(mapinfo);
write_data(header, header_end, 0);
DumpRegion* mc_region = builder->_mc_region;
DumpRegion* rw_region = builder->_rw_region;
DumpRegion* ro_region = builder->_ro_region;
DumpRegion* mc_region = &builder->_mc_region;
DumpRegion* rw_region = &builder->_rw_region;
DumpRegion* ro_region = &builder->_ro_region;
address mc = address(mc_region->base());
address mc_end = address(mc_region->end());
@ -1019,18 +1075,8 @@ public:
}
};
void ArchiveBuilder::write_cds_map_to_log(FileMapInfo* mapinfo,
GrowableArray<MemRegion> *closed_heap_regions,
GrowableArray<MemRegion> *open_heap_regions,
char* bitmap, size_t bitmap_size_in_bytes) {
if (log_is_enabled(Info, cds, map)) {
CDSMapLogger::write(this, mapinfo, closed_heap_regions, open_heap_regions,
bitmap, bitmap_size_in_bytes);
}
}
void ArchiveBuilder::print_stats(int ro_all, int rw_all, int mc_all) {
_alloc_stats->print_stats(ro_all, rw_all, mc_all);
void ArchiveBuilder::print_stats() {
_alloc_stats.print_stats(int(_ro_region.used()), int(_rw_region.used()), int(_mc_region.used()));
}
void ArchiveBuilder::clean_up_src_obj_table() {
@ -1038,6 +1084,20 @@ void ArchiveBuilder::clean_up_src_obj_table() {
_src_obj_table.iterate(&cleaner);
}
void ArchiveBuilder::init_mc_region() {
if (DumpSharedSpaces) { // these are needed only for static archive
// We don't want any valid object to be at the very bottom of the archive.
// See ArchivePtrMarker::mark_pointer().
mc_region()->allocate(16);
size_t trampoline_size = SharedRuntime::trampoline_size();
size_t buf_size = (size_t)AbstractInterpreter::number_of_method_entries * trampoline_size;
MetaspaceShared::set_i2i_entry_code_buffers((address)mc_region()->allocate(buf_size));
}
allocate_method_trampolines();
}
void ArchiveBuilder::allocate_method_trampolines_for(InstanceKlass* ik) {
if (ik->methods() != NULL) {
for (int j = 0; j < ik->methods()->length(); j++) {
@ -1048,9 +1108,9 @@ void ArchiveBuilder::allocate_method_trampolines_for(InstanceKlass* ik) {
MethodTrampolineInfo* info = _adapter_to_trampoline->get(ent);
if (info->c2i_entry_trampoline() == NULL) {
info->set_c2i_entry_trampoline(
(address)MetaspaceShared::misc_code_space_alloc(SharedRuntime::trampoline_size()));
(address)mc_region()->allocate(SharedRuntime::trampoline_size()));
info->set_adapter_trampoline(
(AdapterHandlerEntry**)MetaspaceShared::misc_code_space_alloc(sizeof(AdapterHandlerEntry*)));
(AdapterHandlerEntry**)mc_region()->allocate(sizeof(AdapterHandlerEntry*)));
}
}
}
@ -1069,7 +1129,7 @@ void ArchiveBuilder::allocate_method_trampolines() {
// Allocate MethodTrampolineInfo for all Methods that will be archived. Also
// return the total number of bytes needed by the method trampolines in the MC
// region.
size_t ArchiveBuilder::allocate_method_trampoline_info() {
size_t ArchiveBuilder::collect_method_trampolines() {
size_t total = 0;
size_t each_method_bytes =
align_up(SharedRuntime::trampoline_size(), BytesPerWord) +
@ -1124,6 +1184,123 @@ void ArchiveBuilder::update_method_trampolines() {
}
}
void ArchiveBuilder::write_archive(FileMapInfo* mapinfo,
GrowableArray<MemRegion>* closed_heap_regions,
GrowableArray<MemRegion>* open_heap_regions,
GrowableArray<ArchiveHeapOopmapInfo>* closed_heap_oopmaps,
GrowableArray<ArchiveHeapOopmapInfo>* open_heap_oopmaps) {
// Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with
// MetaspaceShared::n_regions (internal to hotspot).
assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity");
// mc contains the trampoline code for method entries, which are patched at run time,
// so it needs to be read/write.
write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
size_t bitmap_size_in_bytes;
char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), closed_heap_oopmaps, open_heap_oopmaps,
bitmap_size_in_bytes);
if (closed_heap_regions != NULL) {
_total_closed_heap_region_size = mapinfo->write_archive_heap_regions(
closed_heap_regions,
closed_heap_oopmaps,
MetaspaceShared::first_closed_archive_heap_region,
MetaspaceShared::max_closed_archive_heap_region);
_total_open_heap_region_size = mapinfo->write_archive_heap_regions(
open_heap_regions,
open_heap_oopmaps,
MetaspaceShared::first_open_archive_heap_region,
MetaspaceShared::max_open_archive_heap_region);
}
print_region_stats(mapinfo, closed_heap_regions, open_heap_regions);
mapinfo->set_requested_base((char*)MetaspaceShared::requested_base_address());
mapinfo->set_header_crc(mapinfo->compute_header_crc());
mapinfo->write_header();
mapinfo->close();
if (log_is_enabled(Info, cds)) {
print_stats();
}
if (log_is_enabled(Info, cds, map)) {
CDSMapLogger::write(this, mapinfo, closed_heap_regions, open_heap_regions,
bitmap, bitmap_size_in_bytes);
}
FREE_C_HEAP_ARRAY(char, bitmap);
}
void ArchiveBuilder::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) {
mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
}
void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo,
GrowableArray<MemRegion>* closed_heap_regions,
GrowableArray<MemRegion>* open_heap_regions) {
// Print statistics of all the regions
const size_t bitmap_used = mapinfo->space_at(MetaspaceShared::bm)->used();
const size_t bitmap_reserved = mapinfo->space_at(MetaspaceShared::bm)->used_aligned();
const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
_mc_region.reserved() +
bitmap_reserved +
_total_closed_heap_region_size +
_total_open_heap_region_size;
const size_t total_bytes = _ro_region.used() + _rw_region.used() +
_mc_region.used() +
bitmap_used +
_total_closed_heap_region_size +
_total_open_heap_region_size;
const double total_u_perc = percent_of(total_bytes, total_reserved);
_mc_region.print(total_reserved);
_rw_region.print(total_reserved);
_ro_region.print(total_reserved);
print_bitmap_region_stats(bitmap_used, total_reserved);
if (closed_heap_regions != NULL) {
print_heap_region_stats(closed_heap_regions, "ca", total_reserved);
print_heap_region_stats(open_heap_regions, "oa", total_reserved);
}
log_debug(cds)("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
total_bytes, total_reserved, total_u_perc);
}
void ArchiveBuilder::print_bitmap_region_stats(size_t size, size_t total_size) {
log_debug(cds)("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]",
size, size/double(total_size)*100.0, size);
}
void ArchiveBuilder::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
const char *name, size_t total_size) {
int arr_len = heap_mem == NULL ? 0 : heap_mem->length();
for (int i = 0; i < arr_len; i++) {
char* start = (char*)heap_mem->at(i).start();
size_t size = heap_mem->at(i).byte_size();
char* top = start + size;
log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
name, i, size, size/double(total_size)*100.0, size, p2i(start));
}
}
void ArchiveBuilder::report_out_of_space(const char* name, size_t needed_bytes) {
// This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
// On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
// or so.
_mc_region.print_out_of_space_msg(name, needed_bytes);
_rw_region.print_out_of_space_msg(name, needed_bytes);
_ro_region.print_out_of_space_msg(name, needed_bytes);
vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
"Please reduce the number of shared classes.");
}
#ifndef PRODUCT
void ArchiveBuilder::assert_is_vm_thread() {
assert(Thread::current()->is_VM_thread(), "ArchiveBuilder should be used only inside the VMThread");

View File

@ -26,7 +26,9 @@
#define SHARE_MEMORY_ARCHIVEBUILDER_HPP
#include "memory/archiveUtils.hpp"
#include "memory/dumpAllocStats.hpp"
#include "memory/metaspaceClosure.hpp"
#include "oops/array.hpp"
#include "oops/klass.hpp"
#include "runtime/os.hpp"
#include "utilities/bitMap.hpp"
@ -34,13 +36,17 @@
#include "utilities/hashtable.hpp"
#include "utilities/resourceHash.hpp"
struct ArchiveHeapOopmapInfo;
class CHeapBitMap;
class DumpAllocStats;
class FileMapInfo;
class Klass;
class MemRegion;
class Symbol;
// Metaspace::allocate() requires that all blocks must be aligned with KlassAlignmentInBytes.
// We enforce the same alignment rule in blocks allocated from the shared space.
const int SharedSpaceObjectAlignment = KlassAlignmentInBytes;
// Overview of CDS archive creation (for both static and dynamic dump):
//
// [1] Load all classes (static dump: from the classlist, dynamic dump: as part of app execution)
@ -186,9 +192,12 @@ private:
static const int INITIAL_TABLE_SIZE = 15889;
static const int MAX_TABLE_SIZE = 1000000;
DumpRegion* _mc_region;
DumpRegion* _rw_region;
DumpRegion* _ro_region;
ReservedSpace _shared_rs;
VirtualSpace _shared_vs;
DumpRegion _mc_region;
DumpRegion _rw_region;
DumpRegion _ro_region;
CHeapBitMap _ptrmap; // bitmap used by ArchivePtrMarker
SourceObjList _rw_src_objs; // objs to put in rw region
@ -202,7 +211,16 @@ private:
int _num_instance_klasses;
int _num_obj_array_klasses;
int _num_type_array_klasses;
DumpAllocStats* _alloc_stats;
DumpAllocStats _alloc_stats;
size_t _total_closed_heap_region_size;
size_t _total_open_heap_region_size;
void print_region_stats(FileMapInfo *map_info,
GrowableArray<MemRegion>* closed_heap_regions,
GrowableArray<MemRegion>* open_heap_regions);
void print_bitmap_region_stats(size_t size, size_t total_size);
void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
const char *name, size_t total_size);
// For global access.
static ArchiveBuilder* _current;
@ -215,12 +233,13 @@ public:
char* _oldtop;
public:
OtherROAllocMark() {
_oldtop = _current->_ro_region->top();
_oldtop = _current->_ro_region.top();
}
~OtherROAllocMark();
};
private:
bool is_dumping_full_module_graph();
FollowMode get_follow_mode(MetaspaceClosure::Ref *ref);
void iterate_sorted_roots(MetaspaceClosure* it, bool is_relocating_pointers);
@ -254,8 +273,10 @@ protected:
return os::vm_allocation_granularity();
}
void start_dump_space(DumpRegion* next);
void verify_estimate_size(size_t estimate, const char* which);
public:
void set_current_dump_space(DumpRegion* r) { _current_dump_space = r; }
address reserve_buffer();
address buffer_bottom() const { return _buffer_bottom; }
@ -317,7 +338,7 @@ public:
static void assert_is_vm_thread() PRODUCT_RETURN;
public:
ArchiveBuilder(DumpRegion* mc_region, DumpRegion* rw_region, DumpRegion* ro_region);
ArchiveBuilder();
~ArchiveBuilder();
void gather_klasses_and_symbols();
@ -327,6 +348,43 @@ public:
void add_special_ref(MetaspaceClosure::SpecialRef type, address src_obj, size_t field_offset);
void remember_embedded_pointer_in_copied_obj(MetaspaceClosure::Ref* enclosing_ref, MetaspaceClosure::Ref* ref);
DumpRegion* mc_region() { return &_mc_region; }
DumpRegion* rw_region() { return &_rw_region; }
DumpRegion* ro_region() { return &_ro_region; }
static char* mc_region_alloc(size_t num_bytes) {
return current()->mc_region()->allocate(num_bytes);
}
static char* rw_region_alloc(size_t num_bytes) {
return current()->rw_region()->allocate(num_bytes);
}
static char* ro_region_alloc(size_t num_bytes) {
return current()->ro_region()->allocate(num_bytes);
}
template <typename T>
static Array<T>* new_ro_array(int length) {
size_t byte_size = Array<T>::byte_sizeof(length, sizeof(T));
Array<T>* array = (Array<T>*)ro_region_alloc(byte_size);
array->initialize(length);
return array;
}
template <typename T>
static Array<T>* new_rw_array(int length) {
size_t byte_size = Array<T>::byte_sizeof(length, sizeof(T));
Array<T>* array = (Array<T>*)rw_region_alloc(byte_size);
array->initialize(length);
return array;
}
template <typename T>
static size_t ro_array_bytesize(int length) {
size_t byte_size = Array<T>::byte_sizeof(length, sizeof(T));
return align_up(byte_size, SharedSpaceObjectAlignment);
}
void init_mc_region();
void dump_rw_region();
void dump_ro_region();
void relocate_metaspaceobj_embedded_pointers();
@ -334,10 +392,13 @@ public:
void relocate_vm_classes();
void make_klasses_shareable();
void relocate_to_requested();
void write_cds_map_to_log(FileMapInfo* mapinfo,
GrowableArray<MemRegion> *closed_heap_regions,
GrowableArray<MemRegion> *open_heap_regions,
char* bitmap, size_t bitmap_size_in_bytes);
void write_archive(FileMapInfo* mapinfo,
GrowableArray<MemRegion>* closed_heap_regions,
GrowableArray<MemRegion>* open_heap_regions,
GrowableArray<ArchiveHeapOopmapInfo>* closed_heap_oopmaps,
GrowableArray<ArchiveHeapOopmapInfo>* open_heap_oopmaps);
void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region,
bool read_only, bool allow_exec);
address get_dumped_addr(address src_obj) const;
@ -356,7 +417,15 @@ public:
}
static DumpAllocStats* alloc_stats() {
return current()->_alloc_stats;
return &(current()->_alloc_stats);
}
static CompactHashtableStats* symbol_stats() {
return alloc_stats()->symbol_stats();
}
static CompactHashtableStats* string_stats() {
return alloc_stats()->string_stats();
}
void relocate_klass_ptr(oop o);
@ -371,12 +440,13 @@ public:
return (Symbol*)current()->get_dumped_addr((address)orig_symbol);
}
void print_stats(int ro_all, int rw_all, int mc_all);
void print_stats();
void report_out_of_space(const char* name, size_t needed_bytes);
// Method trampolines related functions
size_t collect_method_trampolines();
void allocate_method_trampolines();
void allocate_method_trampolines_for(InstanceKlass* ik);
size_t allocate_method_trampoline_info();
void update_method_trampolines();
};

View File

@ -40,14 +40,13 @@
#include "utilities/bitMap.inline.hpp"
CHeapBitMap* ArchivePtrMarker::_ptrmap = NULL;
address* ArchivePtrMarker::_ptr_base;
address* ArchivePtrMarker::_ptr_end;
VirtualSpace* ArchivePtrMarker::_vs;
bool ArchivePtrMarker::_compacted;
void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, address* ptr_base, address* ptr_end) {
void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, VirtualSpace* vs) {
assert(_ptrmap == NULL, "initialize only once");
_ptr_base = ptr_base;
_ptr_end = ptr_end;
_vs = vs;
_compacted = false;
_ptrmap = ptrmap;
@ -66,17 +65,17 @@ void ArchivePtrMarker::mark_pointer(address* ptr_loc) {
assert(_ptrmap != NULL, "not initialized");
assert(!_compacted, "cannot mark anymore");
if (_ptr_base <= ptr_loc && ptr_loc < _ptr_end) {
if (ptr_base() <= ptr_loc && ptr_loc < ptr_end()) {
address value = *ptr_loc;
// We don't want any pointer that points to very bottom of the archive, otherwise when
// MetaspaceShared::default_base_address()==0, we can't distinguish between a pointer
// to nothing (NULL) vs a pointer to an objects that happens to be at the very bottom
// of the archive.
assert(value != (address)_ptr_base, "don't point to the bottom of the archive");
assert(value != (address)ptr_base(), "don't point to the bottom of the archive");
if (value != NULL) {
assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses");
size_t idx = ptr_loc - _ptr_base;
size_t idx = ptr_loc - ptr_base();
if (_ptrmap->size() <= idx) {
_ptrmap->resize((idx + 1) * 2);
}
@ -91,9 +90,9 @@ void ArchivePtrMarker::clear_pointer(address* ptr_loc) {
assert(_ptrmap != NULL, "not initialized");
assert(!_compacted, "cannot clear anymore");
assert(_ptr_base <= ptr_loc && ptr_loc < _ptr_end, "must be");
assert(ptr_base() <= ptr_loc && ptr_loc < ptr_end(), "must be");
assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses");
size_t idx = ptr_loc - _ptr_base;
size_t idx = ptr_loc - ptr_base();
assert(idx < _ptrmap->size(), "cannot clear pointers that have not been marked");
_ptrmap->clear_bit(idx);
//tty->print_cr("Clearing pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ " SIZE_FORMAT_W(5), p2i(ptr_loc), p2i(*ptr_loc), idx);
@ -132,7 +131,7 @@ public:
void ArchivePtrMarker::compact(address relocatable_base, address relocatable_end) {
assert(!_compacted, "cannot compact again");
ArchivePtrBitmapCleaner cleaner(_ptrmap, _ptr_base, relocatable_base, relocatable_end);
ArchivePtrBitmapCleaner cleaner(_ptrmap, ptr_base(), relocatable_base, relocatable_end);
_ptrmap->iterate(&cleaner);
compact(cleaner.max_non_null_offset());
}
@ -147,16 +146,16 @@ char* DumpRegion::expand_top_to(char* newtop) {
assert(is_allocatable(), "must be initialized and not packed");
assert(newtop >= _top, "must not grow backwards");
if (newtop > _end) {
MetaspaceShared::report_out_of_space(_name, newtop - _top);
ArchiveBuilder::current()->report_out_of_space(_name, newtop - _top);
ShouldNotReachHere();
}
MetaspaceShared::commit_to(_rs, _vs, newtop);
commit_to(newtop);
_top = newtop;
if (_rs == MetaspaceShared::shared_rs()) {
if (_max_delta > 0) {
uintx delta = ArchiveBuilder::current()->buffer_to_offset((address)(newtop-1));
if (delta > ArchiveBuilder::MAX_SHARED_DELTA) {
if (delta > _max_delta) {
// This is just a sanity check and should not appear in any real world usage. This
// happens only if you allocate more than 2GB of shared objects and would require
// millions of shared classes.
@ -168,6 +167,39 @@ char* DumpRegion::expand_top_to(char* newtop) {
return _top;
}
void DumpRegion::commit_to(char* newtop) {
Arguments::assert_is_dumping_archive();
char* base = _rs->base();
size_t need_committed_size = newtop - base;
size_t has_committed_size = _vs->committed_size();
if (need_committed_size < has_committed_size) {
return;
}
size_t min_bytes = need_committed_size - has_committed_size;
size_t preferred_bytes = 1 * M;
size_t uncommitted = _vs->reserved_size() - has_committed_size;
size_t commit = MAX2(min_bytes, preferred_bytes);
commit = MIN2(commit, uncommitted);
assert(commit <= uncommitted, "sanity");
if (!_vs->expand_by(commit, false)) {
vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
need_committed_size));
}
const char* which;
if (_rs->base() == (char*)MetaspaceShared::symbol_rs_base()) {
which = "symbol";
} else {
which = "shared";
}
log_debug(cds)("Expanding %s spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]",
which, commit, _vs->actual_committed_size(), _vs->high());
}
char* DumpRegion::allocate(size_t num_bytes) {
char* p = (char*)align_up(_top, (size_t)SharedSpaceObjectAlignment);
char* newtop = p + align_up(num_bytes, (size_t)SharedSpaceObjectAlignment);

View File

@ -27,6 +27,7 @@
#include "logging/log.hpp"
#include "memory/iterator.hpp"
#include "memory/virtualspace.hpp"
#include "runtime/arguments.hpp"
#include "utilities/bitMap.hpp"
@ -39,15 +40,18 @@ class VirtualSpace;
// mark_pointer(/*ptr_loc=*/&k->_name). It's required that (_prt_base <= ptr_loc < _ptr_end). _ptr_base is
// fixed, but _ptr_end can be expanded as more objects are dumped.
class ArchivePtrMarker : AllStatic {
static CHeapBitMap* _ptrmap;
static address* _ptr_base;
static address* _ptr_end;
static CHeapBitMap* _ptrmap;
static VirtualSpace* _vs;
// Once _ptrmap is compacted, we don't allow bit marking anymore. This is to
// avoid unintentional copy operations after the bitmap has been finalized and written.
static bool _compacted;
static address* ptr_base() { return (address*)_vs->low(); } // committed lower bound (inclusive)
static address* ptr_end() { return (address*)_vs->high(); } // committed upper bound (exclusive)
public:
static void initialize(CHeapBitMap* ptrmap, address* ptr_base, address* ptr_end);
static void initialize(CHeapBitMap* ptrmap, VirtualSpace* vs);
static void mark_pointer(address* ptr_loc);
static void clear_pointer(address* ptr_loc);
static void compact(address relocatable_base, address relocatable_end);
@ -64,11 +68,6 @@ public:
mark_pointer(ptr_loc);
}
static void expand_ptr_end(address *new_ptr_end) {
assert(_ptr_end <= new_ptr_end, "must be");
_ptr_end = new_ptr_end;
}
static CHeapBitMap* ptrmap() {
return _ptrmap;
}
@ -128,12 +127,17 @@ private:
char* _base;
char* _top;
char* _end;
uintx _max_delta;
bool _is_packed;
ReservedSpace* _rs;
VirtualSpace* _vs;
void commit_to(char* newtop);
public:
DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {}
DumpRegion(const char* name, uintx max_delta = 0)
: _name(name), _base(NULL), _top(NULL), _end(NULL),
_max_delta(max_delta), _is_packed(false) {}
char* expand_top_to(char* newtop);
char* allocate(size_t num_bytes);

View File

@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/os.hpp"
#include "runtime/task.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "logging/log.hpp"
#include "memory/archiveUtils.hpp"
#include "memory/archiveBuilder.hpp"
#include "memory/cppVtables.hpp"
#include "memory/metaspaceShared.hpp"
#include "oops/instanceClassLoaderKlass.hpp"
@ -100,7 +101,7 @@ template <class T>
CppVtableInfo* CppVtableCloner<T>::allocate_and_initialize(const char* name) {
int n = get_vtable_length(name);
CppVtableInfo* info =
(CppVtableInfo*)MetaspaceShared::misc_code_dump_space()->allocate(CppVtableInfo::byte_size(n));
(CppVtableInfo*)ArchiveBuilder::current()->mc_region()->allocate(CppVtableInfo::byte_size(n));
info->set_vtable_size(n);
initialize(name, info);
return info;
@ -214,7 +215,7 @@ CppVtableInfo** CppVtables::_index = NULL;
char* CppVtables::dumptime_init() {
assert(DumpSharedSpaces, "must");
size_t vtptrs_bytes = _num_cloned_vtable_kinds * sizeof(CppVtableInfo*);
_index = (CppVtableInfo**)MetaspaceShared::misc_code_dump_space()->allocate(vtptrs_bytes);
_index = (CppVtableInfo**)ArchiveBuilder::current()->mc_region()->allocate(vtptrs_bytes);
CPP_VTABLE_TYPES_DO(ALLOCATE_AND_INITIALIZE_VTABLE);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,25 +26,21 @@
#include "logging/log.hpp"
#include "logging/logMessage.hpp"
#include "memory/dumpAllocStats.hpp"
#include "memory/metaspaceShared.hpp"
void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all) {
// Calculate size of data that was not allocated by Metaspace::allocate()
MetaspaceSharedStats *stats = MetaspaceShared::stats();
// symbols
_counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count;
_bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes;
_counts[RO][SymbolHashentryType] = _symbol_stats.hashentry_count;
_bytes [RO][SymbolHashentryType] = _symbol_stats.hashentry_bytes;
_counts[RO][SymbolBucketType] = stats->symbol.bucket_count;
_bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes;
_counts[RO][SymbolBucketType] = _symbol_stats.bucket_count;
_bytes [RO][SymbolBucketType] = _symbol_stats.bucket_bytes;
// strings
_counts[RO][StringHashentryType] = stats->string.hashentry_count;
_bytes [RO][StringHashentryType] = stats->string.hashentry_bytes;
_counts[RO][StringHashentryType] = _string_stats.hashentry_count;
_bytes [RO][StringHashentryType] = _string_stats.hashentry_bytes;
_counts[RO][StringBucketType] = stats->string.bucket_count;
_bytes [RO][StringBucketType] = stats->string.bucket_bytes;
_counts[RO][StringBucketType] = _string_stats.bucket_count;
_bytes [RO][StringBucketType] = _string_stats.bucket_bytes;
// TODO: count things like dictionary, vtable, etc
_bytes[RW][OtherType] += mc_all;
@ -70,7 +66,7 @@ void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all) {
LogMessage(cds) msg;
msg.debug("Detailed metadata info (excluding st regions; rw stats include mc regions):");
msg.debug("Detailed metadata info (excluding heap regions; rw stats include mc regions):");
msg.debug("%s", hdr);
msg.debug("%s", sep);
for (int type = 0; type < int(_number_of_types); type ++) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@
#define SHARE_MEMORY_DUMPALLOCSTATS_HPP
#include "memory/allocation.hpp"
#include "classfile/compactHashtable.hpp"
// This is for dumping detailed statistics for the allocations
// in the shared spaces.
@ -57,17 +58,23 @@ public:
}
}
public:
enum { RO = 0, RW = 1 };
CompactHashtableStats _symbol_stats;
CompactHashtableStats _string_stats;
int _counts[2][_number_of_types];
int _bytes [2][_number_of_types];
public:
enum { RO = 0, RW = 1 };
DumpAllocStats() {
memset(_counts, 0, sizeof(_counts));
memset(_bytes, 0, sizeof(_bytes));
};
CompactHashtableStats* symbol_stats() { return &_symbol_stats; }
CompactHashtableStats* string_stats() { return &_string_stats; }
void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
int which = (read_only) ? RO : RW;

View File

@ -92,35 +92,7 @@ public:
void write_archive(char* serialized_data);
public:
DynamicArchiveBuilder() : ArchiveBuilder(MetaspaceShared::misc_code_dump_space(),
MetaspaceShared::read_write_dump_space(),
MetaspaceShared::read_only_dump_space()) {
}
void start_dump_space(DumpRegion* next) {
address bottom = _last_verified_top;
address top = (address)(current_dump_space()->top());
_other_region_used_bytes += size_t(top - bottom);
MetaspaceShared::pack_dump_space(current_dump_space(), next, MetaspaceShared::shared_rs());
_current_dump_space = next;
_num_dump_regions_used ++;
_last_verified_top = (address)(current_dump_space()->top());
}
void verify_estimate_size(size_t estimate, const char* which) {
address bottom = _last_verified_top;
address top = (address)(current_dump_space()->top());
size_t used = size_t(top - bottom) + _other_region_used_bytes;
int diff = int(estimate) - int(used);
log_info(cds)("%s estimate = " SIZE_FORMAT " used = " SIZE_FORMAT "; diff = %d bytes", which, estimate, used, diff);
assert(diff >= 0, "Estimate is too small");
_last_verified_top = top;
_other_region_used_bytes = 0;
}
DynamicArchiveBuilder() : ArchiveBuilder() { }
// Do this before and after the archive dump to see if any corruption
// is caused by dynamic dumping.
@ -140,27 +112,16 @@ public:
DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm);
SystemDictionaryShared::check_excluded_classes();
gather_klasses_and_symbols();
// mc space starts ...
reserve_buffer();
init_header();
allocate_method_trampolines();
verify_estimate_size(_estimated_trampoline_bytes, "Trampolines");
gather_source_objs();
// rw space starts ...
start_dump_space(MetaspaceShared::read_write_dump_space());
reserve_buffer();
init_mc_region();
verify_estimate_size(_estimated_trampoline_bytes, "Trampolines");
log_info(cds, dynamic)("Copying %d klasses and %d symbols",
klasses()->length(), symbols()->length());
dump_rw_region();
// ro space starts ...
DumpRegion* ro_space = MetaspaceShared::read_only_dump_space();
start_dump_space(ro_space);
dump_ro_region();
relocate_metaspaceobj_embedded_pointers();
relocate_roots();
@ -173,12 +134,14 @@ public:
// Note that these tables still point to the *original* objects, so
// they would need to call DynamicArchive::original_to_target() to
// get the correct addresses.
assert(current_dump_space() == ro_space, "Must be RO space");
assert(current_dump_space() == ro_region(), "Must be RO space");
SymbolTable::write_to_archive(symbols());
ArchiveBuilder::OtherROAllocMark mark;
SystemDictionaryShared::write_to_archive(false);
serialized_data = ro_space->top();
WriteClosure wc(ro_space);
serialized_data = ro_region()->top();
WriteClosure wc(ro_region());
SymbolTable::serialize_shared_table_header(&wc, false);
SystemDictionaryShared::serialize_dictionary_headers(&wc, false);
}
@ -333,9 +296,6 @@ void DynamicArchiveBuilder::remark_pointers_for_instance_klass(InstanceKlass* k,
}
void DynamicArchiveBuilder::write_archive(char* serialized_data) {
int num_klasses = klasses()->length();
int num_symbols = symbols()->length();
Array<u8>* table = FileMapInfo::saved_shared_path_table().table();
SharedPathTable runtime_table(table, FileMapInfo::shared_path_table().size());
_header->set_shared_path_table(runtime_table);
@ -344,19 +304,8 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data) {
FileMapInfo* dynamic_info = FileMapInfo::dynamic_info();
assert(dynamic_info != NULL, "Sanity");
// Now write the archived data including the file offsets.
const char* archive_name = Arguments::GetSharedDynamicArchivePath();
dynamic_info->open_for_write(archive_name);
size_t bitmap_size_in_bytes;
char* bitmap = MetaspaceShared::write_core_archive_regions(dynamic_info, NULL, NULL, bitmap_size_in_bytes);
dynamic_info->set_requested_base((char*)MetaspaceShared::requested_base_address());
dynamic_info->set_header_crc(dynamic_info->compute_header_crc());
dynamic_info->write_header();
dynamic_info->close();
write_cds_map_to_log(dynamic_info, NULL, NULL,
bitmap, bitmap_size_in_bytes);
FREE_C_HEAP_ARRAY(char, bitmap);
dynamic_info->open_for_write(Arguments::GetSharedDynamicArchivePath());
ArchiveBuilder::write_archive(dynamic_info, NULL, NULL, NULL, NULL);
address base = _requested_dynamic_archive_bottom;
address top = _requested_dynamic_archive_top;
@ -366,13 +315,13 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data) {
" [" SIZE_FORMAT " bytes header, " SIZE_FORMAT " bytes total]",
p2i(base), p2i(top), _header->header_size(), file_size);
log_info(cds, dynamic)("%d klasses; %d symbols", num_klasses, num_symbols);
log_info(cds, dynamic)("%d klasses; %d symbols", klasses()->length(), symbols()->length());
}
class VM_PopulateDynamicDumpSharedSpace: public VM_GC_Sync_Operation {
DynamicArchiveBuilder* _builder;
DynamicArchiveBuilder builder;
public:
VM_PopulateDynamicDumpSharedSpace(DynamicArchiveBuilder* builder) : VM_GC_Sync_Operation(), _builder(builder) {}
VM_PopulateDynamicDumpSharedSpace() : VM_GC_Sync_Operation() {}
VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
void doit() {
ResourceMark rm;
@ -386,7 +335,7 @@ public:
}
FileMapInfo::check_nonempty_dir_in_shared_path_table();
_builder->doit();
builder.doit();
}
};
@ -397,8 +346,7 @@ void DynamicArchive::dump() {
return;
}
DynamicArchiveBuilder builder;
VM_PopulateDynamicDumpSharedSpace op(&builder);
VM_PopulateDynamicDumpSharedSpace op;
VMThread::execute(&op);
}

View File

@ -298,7 +298,7 @@ oop HeapShared::archive_heap_object(oop obj) {
}
void HeapShared::archive_klass_objects() {
GrowableArray<Klass*>* klasses = MetaspaceShared::collected_klasses();
GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
assert(klasses != NULL, "sanity");
for (int i = 0; i < klasses->length(); i++) {
Klass* k = ArchiveBuilder::get_relocated_klass(klasses->at(i));
@ -573,7 +573,7 @@ void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
int num_entry_fields = entry_fields->length();
assert(num_entry_fields % 2 == 0, "sanity");
_entry_field_records =
MetaspaceShared::new_ro_array<int>(num_entry_fields);
ArchiveBuilder::new_ro_array<int>(num_entry_fields);
for (int i = 0 ; i < num_entry_fields; i++) {
_entry_field_records->at_put(i, entry_fields->at(i));
}
@ -584,7 +584,7 @@ void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
if (subgraph_object_klasses != NULL) {
int num_subgraphs_klasses = subgraph_object_klasses->length();
_subgraph_object_klasses =
MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses);
ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses);
for (int i = 0; i < num_subgraphs_klasses; i++) {
Klass* subgraph_k = subgraph_object_klasses->at(i);
if (log_is_enabled(Info, cds, heap)) {
@ -610,7 +610,7 @@ struct CopyKlassSubGraphInfoToArchive : StackObj {
bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) {
ArchivedKlassSubGraphInfoRecord* record =
(ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
(ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
record->init(&info);
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)klass);

View File

@ -24,7 +24,6 @@
*/
#include "precompiled.hpp"
#include "memory/metaspace/printMetaspaceInfoKlassClosure.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "oops/klass.hpp"
#include "oops/reflectionAccessorImplKlassHelper.hpp"

View File

@ -40,18 +40,15 @@
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "gc/shared/gcVMOperations.hpp"
#include "interpreter/abstractInterpreter.hpp"
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/bytecodes.hpp"
#include "logging/log.hpp"
#include "logging/logMessage.hpp"
#include "memory/archiveBuilder.hpp"
#include "memory/archiveUtils.inline.hpp"
#include "memory/cppVtables.hpp"
#include "memory/dumpAllocStats.hpp"
#include "memory/dynamicArchive.hpp"
#include "memory/filemap.hpp"
#include "memory/heapShared.inline.hpp"
#include "memory/heapShared.hpp"
#include "memory/metaspace.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/metaspaceShared.hpp"
@ -75,16 +72,12 @@
#include "utilities/bitMap.inline.hpp"
#include "utilities/ostream.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/hashtable.inline.hpp"
#if INCLUDE_G1GC
#include "gc/g1/g1CollectedHeap.inline.hpp"
#endif
ReservedSpace MetaspaceShared::_shared_rs;
VirtualSpace MetaspaceShared::_shared_vs;
ReservedSpace MetaspaceShared::_symbol_rs;
VirtualSpace MetaspaceShared::_symbol_vs;
MetaspaceSharedStats MetaspaceShared::_stats;
bool MetaspaceShared::_has_error_classes;
bool MetaspaceShared::_archive_loading_failed = false;
bool MetaspaceShared::_remapped_readwrite = false;
@ -105,63 +98,30 @@ bool MetaspaceShared::_use_full_module_graph = true;
// oa0 - open archive heap space #0
// oa1 - open archive heap space #1 (may be empty)
//
// The mc, rw, and ro regions are linearly allocated, starting from
// SharedBaseAddress, in the order of mc->rw->ro. The size of these 3 regions
// are page-aligned, and there's no gap between any consecutive regions.
// bm - bitmap for relocating the above 7 regions.
//
// The mc, rw, and ro regions are linearly allocated, in the order of mc->rw->ro.
// These regions are aligned with MetaspaceShared::reserved_space_alignment().
//
// These 3 regions are populated in the following steps:
// [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
// temporarily allocated outside of the shared regions. Only the method entry
// trampolines are written into the mc region.
// [2] C++ vtables are copied into the mc region.
// [0] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
// temporarily allocated outside of the shared regions.
// [1] We enter a safepoint and allocate a buffer for the mc/rw/ro regions.
// [2] C++ vtables and method trampolines are copied into the mc region.
// [3] ArchiveBuilder copies RW metadata into the rw region.
// [4] ArchiveBuilder copies RO metadata into the ro region.
// [5] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
// are copied into the ro region as read-only tables.
//
// The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects.
// Their layout is independent of the other 4 regions.
// The ca0/ca1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects.
// Their layout is independent of the mc/rw/ro regions.
static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _symbol_region("symbols");
static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space) {
first_space->init(&_shared_rs, &_shared_vs);
}
DumpRegion* MetaspaceShared::misc_code_dump_space() {
return &_mc_region;
}
DumpRegion* MetaspaceShared::read_write_dump_space() {
return &_rw_region;
}
DumpRegion* MetaspaceShared::read_only_dump_space() {
return &_ro_region;
}
void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next,
ReservedSpace* rs) {
current->pack(next);
}
static DumpRegion _symbol_region("symbols");
char* MetaspaceShared::symbol_space_alloc(size_t num_bytes) {
return _symbol_region.allocate(num_bytes);
}
char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
return _mc_region.allocate(num_bytes);
}
char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
return _ro_region.allocate(num_bytes);
}
char* MetaspaceShared::read_write_space_alloc(size_t num_bytes) {
return _rw_region.allocate(num_bytes);
}
size_t MetaspaceShared::reserved_space_alignment() { return os::vm_allocation_granularity(); }
static bool shared_base_valid(char* shared_base) {
@ -316,39 +276,6 @@ void MetaspaceShared::read_extra_data(const char* filename, TRAPS) {
}
}
void MetaspaceShared::commit_to(ReservedSpace* rs, VirtualSpace* vs, char* newtop) {
Arguments::assert_is_dumping_archive();
char* base = rs->base();
size_t need_committed_size = newtop - base;
size_t has_committed_size = vs->committed_size();
if (need_committed_size < has_committed_size) {
return;
}
size_t min_bytes = need_committed_size - has_committed_size;
size_t preferred_bytes = 1 * M;
size_t uncommitted = vs->reserved_size() - has_committed_size;
size_t commit =MAX2(min_bytes, preferred_bytes);
commit = MIN2(commit, uncommitted);
assert(commit <= uncommitted, "sanity");
bool result = vs->expand_by(commit, false);
if (rs == &_shared_rs) {
ArchivePtrMarker::expand_ptr_end((address*)vs->high());
}
if (!result) {
vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
need_committed_size));
}
assert(rs == &_shared_rs || rs == &_symbol_rs, "must be");
const char* which = (rs == &_shared_rs) ? "shared" : "symbol";
log_debug(cds)("Expanding %s spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]",
which, commit, vs->actual_committed_size(), vs->high());
}
// Read/write a data stream for restoring/preserving metadata pointers and
// miscellaneous data from/to the shared archive file.
@ -395,14 +322,10 @@ void MetaspaceShared::serialize(SerializeClosure* soc) {
soc->do_tag(666);
}
void MetaspaceShared::init_misc_code_space() {
// We don't want any valid object to be at the very bottom of the archive.
// See ArchivePtrMarker::mark_pointer().
MetaspaceShared::misc_code_space_alloc(16);
size_t trampoline_size = SharedRuntime::trampoline_size();
size_t buf_size = (size_t)AbstractInterpreter::number_of_method_entries * trampoline_size;
_i2i_entry_code_buffers = (address)misc_code_space_alloc(buf_size);
void MetaspaceShared::set_i2i_entry_code_buffers(address b) {
assert(DumpSharedSpaces, "must be");
assert(_i2i_entry_code_buffers == NULL, "initialize only once");
_i2i_entry_code_buffers = b;
}
address MetaspaceShared::i2i_entry_code_buffers() {
@ -411,14 +334,6 @@ address MetaspaceShared::i2i_entry_code_buffers() {
return _i2i_entry_code_buffers;
}
// Global object for holding classes that have been loaded. Since this
// is run at a safepoint just before exit, this is the entire set of classes.
static GrowableArray<Klass*>* _global_klass_objects;
GrowableArray<Klass*>* MetaspaceShared::collected_klasses() {
return _global_klass_objects;
}
static void rewrite_nofast_bytecode(const methodHandle& method) {
BytecodeStream bcs(method);
while (!bcs.is_last_bytecode()) {
@ -459,7 +374,7 @@ private:
GrowableArray<ArchiveHeapOopmapInfo> *_closed_archive_heap_oopmaps;
GrowableArray<ArchiveHeapOopmapInfo> *_open_archive_heap_oopmaps;
void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN;
void dump_java_heap_objects(GrowableArray<Klass*>* klasses) NOT_CDS_JAVA_HEAP_RETURN;
void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN;
void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions,
GrowableArray<ArchiveHeapOopmapInfo>* oopmaps);
@ -468,10 +383,6 @@ private:
SymbolTable::write_to_archive(symbols);
}
char* dump_read_only_tables();
void print_region_stats(FileMapInfo* map_info);
void print_bitmap_region_stats(size_t size, size_t total_size);
void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
const char *name, size_t total_size);
public:
@ -488,8 +399,7 @@ public:
class StaticArchiveBuilder : public ArchiveBuilder {
public:
StaticArchiveBuilder(DumpRegion* mc_region, DumpRegion* rw_region, DumpRegion* ro_region)
: ArchiveBuilder(mc_region, rw_region, ro_region) {}
StaticArchiveBuilder() : ArchiveBuilder() {}
virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
FileMapInfo::metaspace_pointers_do(it, false);
@ -516,8 +426,9 @@ char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
SystemDictionaryShared::write_to_archive();
// Write the other data to the output array.
char* start = _ro_region.top();
WriteClosure wc(&_ro_region);
DumpRegion* ro_region = ArchiveBuilder::current()->ro_region();
char* start = ro_region->top();
WriteClosure wc(ro_region);
MetaspaceShared::serialize(&wc);
// Write the bitmaps for patching the archive heap regions
@ -557,50 +468,21 @@ void VM_PopulateDumpSharedSpace::doit() {
// that so we don't have to walk the SystemDictionary again.
SystemDictionaryShared::check_excluded_classes();
StaticArchiveBuilder builder(&_mc_region, &_rw_region, &_ro_region);
builder.gather_klasses_and_symbols();
builder.reserve_buffer();
_global_klass_objects = builder.klasses();
StaticArchiveBuilder builder;
builder.gather_source_objs();
builder.reserve_buffer();
MetaspaceShared::init_misc_code_space();
builder.allocate_method_trampoline_info();
builder.allocate_method_trampolines();
builder.init_mc_region();
char* cloned_vtables = CppVtables::dumptime_init();
{
_mc_region.pack(&_rw_region);
builder.set_current_dump_space(&_rw_region);
builder.dump_rw_region();
#if INCLUDE_CDS_JAVA_HEAP
if (MetaspaceShared::use_full_module_graph()) {
// Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
char* start = _rw_region.top();
ClassLoaderDataShared::allocate_archived_tables();
ArchiveBuilder::alloc_stats()->record_modules(_rw_region.top() - start, /*read_only*/false);
}
#endif
}
{
_rw_region.pack(&_ro_region);
builder.set_current_dump_space(&_ro_region);
builder.dump_ro_region();
#if INCLUDE_CDS_JAVA_HEAP
if (MetaspaceShared::use_full_module_graph()) {
char* start = _ro_region.top();
ClassLoaderDataShared::init_archived_tables();
ArchiveBuilder::alloc_stats()->record_modules(_ro_region.top() - start, /*read_only*/true);
}
#endif
}
builder.dump_rw_region();
builder.dump_ro_region();
builder.relocate_metaspaceobj_embedded_pointers();
// Dump supported java heap objects
_closed_archive_heap_regions = NULL;
_open_archive_heap_regions = NULL;
dump_java_heap_objects();
dump_java_heap_objects(builder.klasses());
builder.relocate_roots();
dump_shared_symbol_table(builder.symbols());
@ -614,7 +496,6 @@ void VM_PopulateDumpSharedSpace::doit() {
builder.make_klasses_shareable();
char* serialized_data = dump_read_only_tables();
_ro_region.pack();
SystemDictionaryShared::adjust_lambda_proxy_class_dictionary();
@ -626,42 +507,18 @@ void VM_PopulateDumpSharedSpace::doit() {
// without runtime relocation.
builder.relocate_to_requested();
// Create and write the archive file that maps the shared spaces.
// Write the archive file
FileMapInfo* mapinfo = new FileMapInfo(true);
mapinfo->populate_header(os::vm_allocation_granularity());
mapinfo->set_serialized_data(serialized_data);
mapinfo->set_cloned_vtables(cloned_vtables);
mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers());
mapinfo->open_for_write();
size_t bitmap_size_in_bytes;
char* bitmap = MetaspaceShared::write_core_archive_regions(mapinfo, _closed_archive_heap_oopmaps,
_open_archive_heap_oopmaps,
bitmap_size_in_bytes);
_total_closed_archive_region_size = mapinfo->write_archive_heap_regions(
_closed_archive_heap_regions,
_closed_archive_heap_oopmaps,
MetaspaceShared::first_closed_archive_heap_region,
MetaspaceShared::max_closed_archive_heap_region);
_total_open_archive_region_size = mapinfo->write_archive_heap_regions(
_open_archive_heap_regions,
_open_archive_heap_oopmaps,
MetaspaceShared::first_open_archive_heap_region,
MetaspaceShared::max_open_archive_heap_region);
mapinfo->set_requested_base((char*)MetaspaceShared::requested_base_address());
mapinfo->set_header_crc(mapinfo->compute_header_crc());
mapinfo->write_header();
print_region_stats(mapinfo);
mapinfo->close();
builder.write_cds_map_to_log(mapinfo, _closed_archive_heap_regions, _open_archive_heap_regions,
bitmap, bitmap_size_in_bytes);
FREE_C_HEAP_ARRAY(char, bitmap);
if (log_is_enabled(Info, cds)) {
builder.print_stats(int(_ro_region.used()), int(_rw_region.used()), int(_mc_region.used()));
}
builder.write_archive(mapinfo,
_closed_archive_heap_regions,
_open_archive_heap_regions,
_closed_archive_heap_oopmaps,
_open_archive_heap_oopmaps);
if (PrintSystemDictionaryAtExit) {
SystemDictionary::print();
@ -678,73 +535,6 @@ void VM_PopulateDumpSharedSpace::doit() {
vm_direct_exit(0);
}
void VM_PopulateDumpSharedSpace::print_region_stats(FileMapInfo *map_info) {
// Print statistics of all the regions
const size_t bitmap_used = map_info->space_at(MetaspaceShared::bm)->used();
const size_t bitmap_reserved = map_info->space_at(MetaspaceShared::bm)->used_aligned();
const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
_mc_region.reserved() +
bitmap_reserved +
_total_closed_archive_region_size +
_total_open_archive_region_size;
const size_t total_bytes = _ro_region.used() + _rw_region.used() +
_mc_region.used() +
bitmap_used +
_total_closed_archive_region_size +
_total_open_archive_region_size;
const double total_u_perc = percent_of(total_bytes, total_reserved);
_mc_region.print(total_reserved);
_rw_region.print(total_reserved);
_ro_region.print(total_reserved);
print_bitmap_region_stats(bitmap_used, total_reserved);
print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved);
print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
log_debug(cds)("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
total_bytes, total_reserved, total_u_perc);
}
void VM_PopulateDumpSharedSpace::print_bitmap_region_stats(size_t size, size_t total_size) {
log_debug(cds)("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]",
size, size/double(total_size)*100.0, size);
}
void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
const char *name, size_t total_size) {
int arr_len = heap_mem == NULL ? 0 : heap_mem->length();
for (int i = 0; i < arr_len; i++) {
char* start = (char*)heap_mem->at(i).start();
size_t size = heap_mem->at(i).byte_size();
char* top = start + size;
log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
name, i, size, size/double(total_size)*100.0, size, p2i(start));
}
}
char* MetaspaceShared::write_core_archive_regions(FileMapInfo* mapinfo,
GrowableArray<ArchiveHeapOopmapInfo>* closed_oopmaps,
GrowableArray<ArchiveHeapOopmapInfo>* open_oopmaps,
size_t& bitmap_size_in_bytes) {
// Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with
// MetaspaceShared::n_regions (internal to hotspot).
assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity");
// mc contains the trampoline code for method entries, which are patched at run time,
// so it needs to be read/write.
write_region(mapinfo, mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
write_region(mapinfo, rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
write_region(mapinfo, ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
return mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), closed_oopmaps, open_oopmaps,
bitmap_size_in_bytes);
}
void MetaspaceShared::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) {
mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
}
static GrowableArray<ClassLoaderData*>* _loaded_cld = NULL;
class CollectCLDClosure : public CLDClosure {
@ -987,7 +777,7 @@ bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) {
}
#if INCLUDE_CDS_JAVA_HEAP
void VM_PopulateDumpSharedSpace::dump_java_heap_objects() {
void VM_PopulateDumpSharedSpace::dump_java_heap_objects(GrowableArray<Klass*>* klasses) {
if(!HeapShared::is_heap_object_archiving_allowed()) {
log_info(cds)(
"Archived java heap is not supported as UseG1GC, "
@ -999,8 +789,8 @@ void VM_PopulateDumpSharedSpace::dump_java_heap_objects() {
}
// Find all the interned strings that should be dumped.
int i;
for (i = 0; i < _global_klass_objects->length(); i++) {
Klass* k = _global_klass_objects->at(i);
for (i = 0; i < klasses->length(); i++) {
Klass* k = klasses->at(i);
if (k->is_instance_klass()) {
InstanceKlass* ik = InstanceKlass::cast(k);
ik->constants()->add_dumped_interned_strings();
@ -1640,18 +1430,6 @@ bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
return true;
}
void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) {
// This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
// On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
// or so.
_mc_region.print_out_of_space_msg(name, needed_bytes);
_rw_region.print_out_of_space_msg(name, needed_bytes);
_ro_region.print_out_of_space_msg(name, needed_bytes);
vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
"Please reduce the number of shared classes.");
}
bool MetaspaceShared::use_full_module_graph() {
#if INCLUDE_CDS_JAVA_HEAP
if (ClassLoaderDataShared::is_full_module_graph_loaded()) {
@ -1669,24 +1447,16 @@ bool MetaspaceShared::use_full_module_graph() {
}
void MetaspaceShared::print_on(outputStream* st) {
if (UseSharedSpaces || DumpSharedSpaces) {
if (UseSharedSpaces) {
st->print("CDS archive(s) mapped at: ");
address base;
address top;
if (UseSharedSpaces) { // Runtime
base = (address)MetaspaceObj::shared_metaspace_base();
address static_top = (address)_shared_metaspace_static_top;
top = (address)MetaspaceObj::shared_metaspace_top();
st->print("[" PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(static_top), p2i(top));
} else if (DumpSharedSpaces) { // Dump Time
base = (address)_shared_rs.base();
top = (address)_shared_rs.end();
st->print("[" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(top));
}
address base = (address)MetaspaceObj::shared_metaspace_base();
address static_top = (address)_shared_metaspace_static_top;
address top = (address)MetaspaceObj::shared_metaspace_top();
st->print("[" PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(static_top), p2i(top));
st->print("size " SIZE_FORMAT ", ", top - base);
st->print("SharedBaseAddress: " PTR_FORMAT ", ArchiveRelocationMode: %d.", SharedBaseAddress, (int)ArchiveRelocationMode);
} else {
st->print("CDS disabled.");
st->print("CDS archive(s) not mapped");
}
st->cr();
}

View File

@ -25,7 +25,6 @@
#ifndef SHARE_MEMORY_METASPACESHARED_HPP
#define SHARE_MEMORY_METASPACESHARED_HPP
#include "classfile/compactHashtable.hpp"
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "memory/virtualspace.hpp"
@ -33,15 +32,10 @@
#include "utilities/macros.hpp"
#include "utilities/resourceHash.hpp"
// Metaspace::allocate() requires that all blocks must be aligned with KlassAlignmentInBytes.
// We enforce the same alignment rule in blocks allocated from the shared space.
const int SharedSpaceObjectAlignment = KlassAlignmentInBytes;
class outputStream;
class CHeapBitMap;
class FileMapInfo;
class DumpRegion;
struct ArchiveHeapOopmapInfo;
class outputStream;
template<class E> class GrowableArray;
enum MapArchiveResult {
MAP_ARCHIVE_SUCCESS,
@ -49,32 +43,14 @@ enum MapArchiveResult {
MAP_ARCHIVE_OTHER_FAILURE
};
class MetaspaceSharedStats {
public:
MetaspaceSharedStats() {
memset(this, 0, sizeof(*this));
}
CompactHashtableStats symbol;
CompactHashtableStats string;
};
// Class Data Sharing Support
class MetaspaceShared : AllStatic {
// CDS support
// Note: _shared_rs and _symbol_rs are only used at dump time.
static ReservedSpace _shared_rs;
static VirtualSpace _shared_vs;
static ReservedSpace _symbol_rs;
static VirtualSpace _symbol_vs;
static int _max_alignment;
static MetaspaceSharedStats _stats;
static ReservedSpace _symbol_rs; // used only during -Xshare:dump
static VirtualSpace _symbol_vs; // used only during -Xshare:dump
static bool _has_error_classes;
static bool _archive_loading_failed;
static bool _remapped_readwrite;
static address _i2i_entry_code_buffers;
static size_t _core_spaces_size;
static void* _shared_metaspace_static_top;
static intx _relocation_delta;
static char* _requested_base_address;
@ -107,22 +83,10 @@ class MetaspaceShared : AllStatic {
static int preload_classes(const char * class_list_path,
TRAPS) NOT_CDS_RETURN_(0);
static GrowableArray<Klass*>* collected_klasses();
static ReservedSpace* shared_rs() {
CDS_ONLY(return &_shared_rs);
NOT_CDS(return NULL);
}
static Symbol* symbol_rs_base() {
return (Symbol*)_symbol_rs.base();
}
static void set_shared_rs(ReservedSpace rs) {
CDS_ONLY(_shared_rs = rs);
}
static void commit_to(ReservedSpace* rs, VirtualSpace* vs, char* newtop) NOT_CDS_RETURN;
static void initialize_for_static_dump() NOT_CDS_RETURN;
static void initialize_runtime_shared_and_meta_spaces() NOT_CDS_RETURN;
static void post_initialize(TRAPS) NOT_CDS_RETURN;
@ -132,10 +96,6 @@ class MetaspaceShared : AllStatic {
static void set_archive_loading_failed() {
_archive_loading_failed = true;
}
static bool is_in_output_space(void* ptr) {
assert(DumpSharedSpaces, "must be");
return shared_rs()->contains(ptr);
}
static bool map_shared_spaces(FileMapInfo* mapinfo) NOT_CDS_RETURN_(false);
static void initialize_shared_spaces() NOT_CDS_RETURN;
@ -161,12 +121,6 @@ class MetaspaceShared : AllStatic {
static void serialize(SerializeClosure* sc) NOT_CDS_RETURN;
static MetaspaceSharedStats* stats() {
return &_stats;
}
static void report_out_of_space(const char* name, size_t needed_bytes);
// JVM/TI RedefineClasses() support:
// Remap the shared readonly space to shared readwrite, private if
// sharing is enabled. Simply returns true if sharing is not enabled
@ -184,50 +138,15 @@ class MetaspaceShared : AllStatic {
#if INCLUDE_CDS
static size_t reserved_space_alignment();
static void init_shared_dump_space(DumpRegion* first_space);
static DumpRegion* misc_code_dump_space();
static DumpRegion* read_write_dump_space();
static DumpRegion* read_only_dump_space();
static void pack_dump_space(DumpRegion* current, DumpRegion* next,
ReservedSpace* rs);
static void rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread, InstanceKlass* ik);
#endif
// Allocate a block of memory from the temporary "symbol" region.
static char* symbol_space_alloc(size_t num_bytes);
// Allocate a block of memory from the "mc" or "ro" regions.
static char* misc_code_space_alloc(size_t num_bytes);
static char* read_only_space_alloc(size_t num_bytes);
static char* read_write_space_alloc(size_t num_bytes);
template <typename T>
static Array<T>* new_ro_array(int length) {
size_t byte_size = Array<T>::byte_sizeof(length, sizeof(T));
Array<T>* array = (Array<T>*)read_only_space_alloc(byte_size);
array->initialize(length);
return array;
}
template <typename T>
static Array<T>* new_rw_array(int length) {
size_t byte_size = Array<T>::byte_sizeof(length, sizeof(T));
Array<T>* array = (Array<T>*)read_write_space_alloc(byte_size);
array->initialize(length);
return array;
}
template <typename T>
static size_t ro_array_bytesize(int length) {
size_t byte_size = Array<T>::byte_sizeof(length, sizeof(T));
return align_up(byte_size, SharedSpaceObjectAlignment);
}
static void init_misc_code_space();
static address i2i_entry_code_buffers();
static void initialize_ptr_marker(CHeapBitMap* ptrmap);
static void set_i2i_entry_code_buffers(address b);
// This is the base address as specified by -XX:SharedBaseAddress during -Xshare:dump.
// Both the base/top archives are written using this as their base address.
@ -254,13 +173,6 @@ class MetaspaceShared : AllStatic {
return is_windows;
}
// Returns the bitmap region which is allocated from C heap.
// Caller must free it with FREE_C_HEAP_ARRAY()
static char* write_core_archive_regions(FileMapInfo* mapinfo,
GrowableArray<ArchiveHeapOopmapInfo>* closed_oopmaps,
GrowableArray<ArchiveHeapOopmapInfo>* open_oopmaps,
size_t& bitmap_size_in_bytes);
// Can we skip some expensive operations related to modules?
static bool use_optimized_module_handling() { return NOT_CDS(false) CDS_ONLY(_use_optimized_module_handling); }
static void disable_optimized_module_handling() { _use_optimized_module_handling = false; }
@ -270,10 +182,6 @@ class MetaspaceShared : AllStatic {
static void disable_full_module_graph() { _use_full_module_graph = false; }
private:
#if INCLUDE_CDS
static void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region,
bool read_only, bool allow_exec);
#endif
static void read_extra_data(const char* filename, TRAPS) NOT_CDS_RETURN;
static FileMapInfo* open_static_archive();
static FileMapInfo* open_dynamic_archive();

View File

@ -1,39 +0,0 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_MEMORY_METASPACESHARED_INLINE_HPP
#define SHARE_MEMORY_METASPACESHARED_INLINE_HPP
#include "memory/metaspaceShared.hpp"
#if INCLUDE_G1GC
#include "gc/g1/g1Allocator.inline.hpp"
#endif
#if INCLUDE_CDS_JAVA_HEAP
bool MetaspaceShared::is_archive_object(oop p) {
return (p == NULL) ? false : G1ArchiveAllocator::is_archive_object(p);
}
#endif
#endif // SHARE_MEMORY_METASPACESHARED_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,8 +35,8 @@
template <typename T>
class Array: public MetaspaceObj {
friend class ArchiveBuilder;
friend class MetadataFactory;
friend class MetaspaceShared;
friend class VMStructs;
friend class JVMCIVMStructs;
friend class MethodHandleCompiler; // special case

View File

@ -39,7 +39,6 @@
#include "memory/heapShared.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"

View File

@ -36,7 +36,6 @@
#include "memory/heapShared.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "oops/access.inline.hpp"
#include "oops/compressedOops.hpp"

View File

@ -38,7 +38,6 @@
#include "memory/heapShared.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"

View File

@ -45,7 +45,6 @@
#include "interpreter/interpreterRuntime.hpp"
#include "jfr/jfrEvents.hpp"
#include "logging/log.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/compiledICHolder.inline.hpp"

View File

@ -28,7 +28,6 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/padded.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"

View File

@ -49,7 +49,7 @@ public class LotsOfClasses {
opts.addSuffix("-Xlog:hashtables");
opts.addSuffix("-Xmx500m");
opts.addSuffix("-Xlog:gc+region+cds");
opts.addSuffix("-Xlog:gc+region=trace");
opts.addSuffix("-Xlog:cds=debug"); // test detailed metadata info printing
CDSTestUtils.createArchiveAndCheck(opts);
}

View File

@ -75,12 +75,12 @@ public class DynamicLotsOfClasses extends DynamicArchiveTestBase {
"ALL-SYSTEM",
"-Xlog:hashtables",
"-Xmx500m",
"-Xlog:cds,cds+dynamic",
"-Xlog:cds=debug", // test detailed metadata info printing
"-Xlog:cds+dynamic=info",
bootClassPath,
"-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI",
"-cp", appJar, mainClass, classList)
.assertNormalExit(output -> {
output.shouldContain("Written dynamic archive 0x");
});
.assertNormalExit("Written dynamic archive 0x",
"Detailed metadata info");
}
}