8251330: Reorder CDS archived heap to speed up relocation

Reviewed-by: iklam, ccheung
This commit is contained in:
Matias Saavedra Silva 2024-03-13 14:00:59 +00:00
parent 7d8561d56b
commit 7e05a70301
9 changed files with 204 additions and 34 deletions

View File

@ -164,18 +164,19 @@ void ArchiveHeapLoader::patch_compressed_embedded_pointers(BitMapView bm,
// Optimization: if dumptime shift is the same as runtime shift, we can perform a
// quick conversion from "dumptime narrowOop" -> "runtime narrowOop".
narrowOop* patching_start = (narrowOop*)region.start() + FileMapInfo::current_info()->heap_oopmap_start_pos();
if (_narrow_oop_shift == CompressedOops::shift()) {
uint32_t quick_delta = (uint32_t)rt_encoded_bottom - (uint32_t)dt_encoded_bottom;
log_info(cds)("CDS heap data relocation quick delta = 0x%x", quick_delta);
if (quick_delta == 0) {
log_info(cds)("CDS heap data relocation unnecessary, quick_delta = 0");
} else {
PatchCompressedEmbeddedPointersQuick patcher((narrowOop*)region.start(), quick_delta);
PatchCompressedEmbeddedPointersQuick patcher(patching_start, quick_delta);
bm.iterate(&patcher);
}
} else {
log_info(cds)("CDS heap data quick relocation not possible");
PatchCompressedEmbeddedPointers patcher((narrowOop*)region.start());
PatchCompressedEmbeddedPointers patcher(patching_start);
bm.iterate(&patcher);
}
}
@ -186,17 +187,10 @@ void ArchiveHeapLoader::patch_embedded_pointers(FileMapInfo* info,
MemRegion region, address oopmap,
size_t oopmap_size_in_bits) {
BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
#ifndef PRODUCT
ResourceMark rm;
ResourceBitMap checkBm = HeapShared::calculate_oopmap(region);
assert(bm.is_same(checkBm), "sanity");
#endif
if (UseCompressedOops) {
patch_compressed_embedded_pointers(bm, info, region);
} else {
PatchUncompressedEmbeddedPointers patcher((oop*)region.start());
PatchUncompressedEmbeddedPointers patcher((oop*)region.start() + FileMapInfo::current_info()->heap_oopmap_start_pos());
bm.iterate(&patcher);
}
}
@ -316,7 +310,7 @@ bool ArchiveHeapLoader::load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiv
uintptr_t oopmap = bitmap_base + r->oopmap_offset();
BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits());
PatchLoadedRegionPointers patcher((narrowOop*)load_address, loaded_region);
PatchLoadedRegionPointers patcher((narrowOop*)load_address + FileMapInfo::current_info()->heap_oopmap_start_pos(), loaded_region);
bm.iterate(&patcher);
return true;
}
@ -449,7 +443,7 @@ void ArchiveHeapLoader::patch_native_pointers() {
if (r->mapped_base() != nullptr && r->has_ptrmap()) {
log_info(cds, heap)("Patching native pointers in heap region");
BitMapView bm = r->ptrmap_view();
PatchNativePointers patcher((Metadata**)r->mapped_base());
PatchNativePointers patcher((Metadata**)r->mapped_base() + FileMapInfo::current_info()->heap_ptrmap_start_pos());
bm.iterate(&patcher);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -62,6 +62,7 @@ address ArchiveHeapWriter::_requested_top;
GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers;
GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs;
GrowableArrayCHeap<int, mtClassShared>* ArchiveHeapWriter::_source_objs_order;
ArchiveHeapWriter::BufferOffsetToSourceObjectTable*
ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
@ -72,6 +73,7 @@ typedef ResourceHashtable<address, size_t,
AnyObj::C_HEAP,
mtClassShared> FillersTable;
static FillersTable* _fillers;
static int _num_native_ptrs = 0;
void ArchiveHeapWriter::init() {
if (HeapShared::can_write()) {
@ -84,6 +86,7 @@ void ArchiveHeapWriter::init() {
_native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048);
_source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
_source_objs_order = new GrowableArrayCHeap<int, mtClassShared>(10000);
guarantee(UseG1GC, "implementation limitation");
guarantee(MIN_GC_REGION_ALIGNMENT <= /*G1*/HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
@ -91,6 +94,7 @@ void ArchiveHeapWriter::init() {
}
void ArchiveHeapWriter::add_source_obj(oop src_obj) {
_source_objs_order->append(_source_objs->length());
_source_objs->append(src_obj);
}
@ -226,9 +230,54 @@ void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShar
_buffer_used = new_used;
}
static int oop_sorting_rank(oop o) {
bool has_o_ptr = HeapShared::has_oop_pointers(o);
bool has_n_ptr = HeapShared::has_native_pointers(o);
if (!has_o_ptr) {
if (!has_n_ptr) {
return 0;
} else {
return 1;
}
} else {
if (has_n_ptr) {
return 2;
} else {
return 3;
}
}
}
// The goal is to sort the objects in increasing order of:
// - objects that have no pointers
// - objects that have only native pointers
// - objects that have both native and oop pointers
// - objects that have only oop pointers
int ArchiveHeapWriter::compare_objs_by_oop_fields(int* a, int* b) {
oop oa = _source_objs->at(*a);
oop ob = _source_objs->at(*b);
int rank_a = oop_sorting_rank(oa);
int rank_b = oop_sorting_rank(ob);
if (rank_a != rank_b) {
return rank_a - rank_b;
} else {
// If they are the same rank, sort them by their position in the _source_objs array
return *a - *b;
}
}
void ArchiveHeapWriter::sort_source_objs() {
_source_objs_order->sort(compare_objs_by_oop_fields);
}
void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
for (int i = 0; i < _source_objs->length(); i++) {
oop src_obj = _source_objs->at(i);
sort_source_objs();
for (int i = 0; i < _source_objs_order->length(); i++) {
int src_obj_index = _source_objs_order->at(i);
oop src_obj = _source_objs->at(src_obj_index);
HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
assert(info != nullptr, "must be");
size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
@ -239,8 +288,8 @@ void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtCla
copy_roots_to_buffer(roots);
log_info(cds)("Size of heap region = " SIZE_FORMAT " bytes, %d objects, %d roots",
_buffer_used, _source_objs->length() + 1, roots->length());
log_info(cds)("Size of heap region = " SIZE_FORMAT " bytes, %d objects, %d roots, %d native ptrs",
_buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
}
size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
@ -512,6 +561,17 @@ private:
}
};
static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bits) {
// The whole heap is covered by total_bits, but there are only non-zero bits within [start ... end).
size_t start = bitmap->find_first_set_bit(0);
size_t end = bitmap->size();
log_info(cds)("%s = " SIZE_FORMAT_W(7) " ... " SIZE_FORMAT_W(7) " (%3zu%% ... %3zu%% = %3zu%%)", which,
start, end,
start * 100 / total_bits,
end * 100 / total_bits,
(end - start) * 100 / total_bits);
}
// Update all oop fields embedded in the buffered objects
void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
ArchiveHeapInfo* heap_info) {
@ -519,14 +579,17 @@ void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassSh
size_t heap_region_byte_size = _buffer_used;
heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit);
auto iterator = [&] (oop src_obj, HeapShared::CachedOopInfo& info) {
oop requested_obj = requested_obj_from_buffer_offset(info.buffer_offset());
for (int i = 0; i < _source_objs_order->length(); i++) {
int src_obj_index = _source_objs_order->at(i);
oop src_obj = _source_objs->at(src_obj_index);
HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
assert(info != nullptr, "must be");
oop requested_obj = requested_obj_from_buffer_offset(info->buffer_offset());
update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass());
address buffered_obj = offset_to_buffered_address<address>(info.buffer_offset());
address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset());
EmbeddedOopRelocator relocator(src_obj, buffered_obj, heap_info->oopmap());
src_obj->oop_iterate(&relocator);
};
HeapShared::archived_object_cache()->iterate_all(iterator);
// Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and
// doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it.
@ -542,6 +605,10 @@ void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassSh
}
compute_ptrmap(heap_info);
size_t total_bytes = (size_t)_buffer->length();
log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)));
log_bitmap_usage("ptrmap", heap_info->ptrmap(), total_bytes / sizeof(address));
}
void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
@ -551,6 +618,8 @@ void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
info._src_obj = src_obj;
info._field_offset = field_offset;
_native_pointers->append(info);
HeapShared::set_has_native_pointers(src_obj);
_num_native_ptrs ++;
}
}
@ -565,6 +634,13 @@ bool ArchiveHeapWriter::is_marked_as_native_pointer(ArchiveHeapInfo* heap_info,
assert((Metadata**)_requested_bottom <= requested_field_addr && requested_field_addr < (Metadata**) _requested_top, "range check");
BitMap::idx_t idx = requested_field_addr - (Metadata**) _requested_bottom;
// Leading zeros have been removed so some addresses may not be in the ptrmap
size_t start_pos = FileMapInfo::current_info()->heap_ptrmap_start_pos();
if (idx < start_pos) {
return false;
} else {
idx -= start_pos;
}
return (idx < heap_info->ptrmap()->size()) && (heap_info->ptrmap()->at(idx) == true);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -140,6 +140,7 @@ private:
static GrowableArrayCHeap<NativePointerInfo, mtClassShared>* _native_pointers;
static GrowableArrayCHeap<oop, mtClassShared>* _source_objs;
static GrowableArrayCHeap<int, mtClassShared>* _source_objs_order;
typedef ResourceHashtable<size_t, oop,
36137, // prime number
@ -210,6 +211,10 @@ private:
template <typename T> static void relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap);
static void update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass);
static int compare_objs_by_oop_fields(int* a, int* b);
static void sort_source_objs();
public:
static void init() NOT_CDS_JAVA_HEAP_RETURN;
static void add_source_obj(oop src_obj);

View File

@ -289,6 +289,8 @@ void FileMapHeader::print(outputStream* st) {
st->print_cr("- requested_base_address: " INTPTR_FORMAT, p2i(_requested_base_address));
st->print_cr("- mapped_base_address: " INTPTR_FORMAT, p2i(_mapped_base_address));
st->print_cr("- heap_roots_offset: " SIZE_FORMAT, _heap_roots_offset);
st->print_cr("- _heap_oopmap_start_pos: " SIZE_FORMAT, _heap_oopmap_start_pos);
st->print_cr("- _heap_ptrmap_start_pos: " SIZE_FORMAT, _heap_ptrmap_start_pos);
st->print_cr("- allow_archiving_with_java_agent:%d", _allow_archiving_with_java_agent);
st->print_cr("- use_optimized_module_handling: %d", _use_optimized_module_handling);
st->print_cr("- has_full_module_graph %d", _has_full_module_graph);
@ -1565,11 +1567,37 @@ static size_t write_bitmap(const CHeapBitMap* map, char* output, size_t offset)
return offset + size_in_bytes;
}
// The start of the archived heap has many primitive arrays (String
// bodies) that are not marked by the oop/ptr maps. So we must have
// lots of leading zeros.
size_t FileMapInfo::remove_bitmap_leading_zeros(CHeapBitMap* map) {
size_t old_zeros = map->find_first_set_bit(0);
size_t old_size = map->size_in_bytes();
// Slice and resize bitmap
map->truncate(old_zeros, map->size());
DEBUG_ONLY(
size_t new_zeros = map->find_first_set_bit(0);
assert(new_zeros == 0, "Should have removed leading zeros");
)
assert(map->size_in_bytes() < old_size, "Map size should have decreased");
return old_zeros;
}
char* FileMapInfo::write_bitmap_region(const CHeapBitMap* ptrmap, ArchiveHeapInfo* heap_info,
size_t &size_in_bytes) {
size_in_bytes = ptrmap->size_in_bytes();
if (heap_info->is_used()) {
// Remove leading zeros
size_t removed_oop_zeros = remove_bitmap_leading_zeros(heap_info->oopmap());
size_t removed_ptr_zeros = remove_bitmap_leading_zeros(heap_info->ptrmap());
header()->set_heap_oopmap_start_pos(removed_oop_zeros);
header()->set_heap_ptrmap_start_pos(removed_ptr_zeros);
size_in_bytes += heap_info->oopmap()->size_in_bytes();
size_in_bytes += heap_info->ptrmap()->size_in_bytes();
}

View File

@ -228,6 +228,8 @@ private:
size_t _ptrmap_size_in_bits; // Size of pointer relocation bitmap
size_t _heap_roots_offset; // Offset of the HeapShared::roots() object, from the bottom
// of the archived heap objects, in bytes.
size_t _heap_oopmap_start_pos; // The first bit in the oopmap corresponds to this position in the heap.
size_t _heap_ptrmap_start_pos; // The first bit in the ptrmap corresponds to this position in the heap.
char* from_mapped_offset(size_t offset) const {
return mapped_base_address() + offset;
}
@ -269,6 +271,8 @@ public:
bool compressed_oops() const { return _compressed_oops; }
bool compressed_class_pointers() const { return _compressed_class_ptrs; }
size_t heap_roots_offset() const { return _heap_roots_offset; }
size_t heap_oopmap_start_pos() const { return _heap_oopmap_start_pos;}
size_t heap_ptrmap_start_pos() const { return _heap_ptrmap_start_pos;}
// FIXME: These should really return int
jshort max_used_path_index() const { return _max_used_path_index; }
jshort app_module_paths_start_index() const { return _app_module_paths_start_index; }
@ -281,6 +285,8 @@ public:
void set_ptrmap_size_in_bits(size_t s) { _ptrmap_size_in_bits = s; }
void set_mapped_base_address(char* p) { _mapped_base_address = p; }
void set_heap_roots_offset(size_t n) { _heap_roots_offset = n; }
void set_heap_oopmap_start_pos(size_t n) { _heap_oopmap_start_pos = n; }
void set_heap_ptrmap_start_pos(size_t n) { _heap_ptrmap_start_pos = n; }
void copy_base_archive_name(const char* name);
void set_shared_path_table(SharedPathTable table) {
@ -378,6 +384,8 @@ public:
uintx max_heap_size() const { return header()->max_heap_size(); }
size_t heap_roots_offset() const { return header()->heap_roots_offset(); }
size_t core_region_alignment() const { return header()->core_region_alignment(); }
size_t heap_oopmap_start_pos() const { return header()->heap_oopmap_start_pos(); }
size_t heap_ptrmap_start_pos() const { return header()->heap_ptrmap_start_pos(); }
CompressedOops::Mode narrow_oop_mode() const { return header()->narrow_oop_mode(); }
jshort app_module_paths_start_index() const { return header()->app_module_paths_start_index(); }
@ -434,6 +442,7 @@ public:
void write_header();
void write_region(int region, char* base, size_t size,
bool read_only, bool allow_exec);
size_t remove_bitmap_leading_zeros(CHeapBitMap* map);
char* write_bitmap_region(const CHeapBitMap* ptrmap, ArchiveHeapInfo* heap_info,
size_t &size_in_bytes);
size_t write_heap_region(ArchiveHeapInfo* heap_info);

View File

@ -284,7 +284,7 @@ bool HeapShared::archive_object(oop obj) {
// the identity_hash in the object header will have a predictable value,
// making the archive reproducible.
obj->identity_hash();
CachedOopInfo info = make_cached_oop_info();
CachedOopInfo info = make_cached_oop_info(obj);
archived_object_cache()->put(obj, info);
mark_native_pointers(obj);
@ -437,6 +437,24 @@ void HeapShared::mark_native_pointers(oop orig_obj) {
}
}
bool HeapShared::has_oop_pointers(oop src_obj) {
CachedOopInfo* info = archived_object_cache()->get(src_obj);
assert(info != nullptr, "must be");
return info->has_oop_pointers();
}
bool HeapShared::has_native_pointers(oop src_obj) {
CachedOopInfo* info = archived_object_cache()->get(src_obj);
assert(info != nullptr, "must be");
return info->has_native_pointers();
}
void HeapShared::set_has_native_pointers(oop src_obj) {
CachedOopInfo* info = archived_object_cache()->get(src_obj);
assert(info != nullptr, "must be");
info->set_has_native_pointers();
}
// -- Handling of Enum objects
// Java Enum classes have synthetic <clinit> methods that look like this
// enum MyEnum {FOO, BAR}
@ -1138,10 +1156,27 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = nullptr;
HeapShared::CachedOopInfo HeapShared::make_cached_oop_info() {
// Checks if an oop has any non-null oop fields
class PointsToOopsChecker : public BasicOopIterateClosure {
bool _result;
template <class T> void check(T *p) {
_result |= (HeapAccess<>::oop_load(p) != nullptr);
}
public:
PointsToOopsChecker() : _result(false) {}
void do_oop(narrowOop *p) { check(p); }
void do_oop( oop *p) { check(p); }
bool result() { return _result; }
};
HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj) {
WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
oop referrer = (walker == nullptr) ? nullptr : walker->referencing_obj();
return CachedOopInfo(referrer);
PointsToOopsChecker points_to_oops_checker;
obj->oop_iterate(&points_to_oops_checker);
return CachedOopInfo(referrer, points_to_oops_checker.result());
}
// (1) If orig_obj has not been archived yet, archive it.
@ -1439,12 +1474,14 @@ void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
#ifndef PRODUCT
bool is_test_class = (ArchiveHeapTestClass != nullptr) && (strcmp(info->klass_name, ArchiveHeapTestClass) == 0);
const char* test_class_name = ArchiveHeapTestClass;
#else
bool is_test_class = false;
const char* test_class_name = ""; // avoid C++ printf checks warnings.
#endif
if (is_test_class) {
log_warning(cds)("Loading ArchiveHeapTestClass %s ...", ArchiveHeapTestClass);
log_warning(cds)("Loading ArchiveHeapTestClass %s ...", test_class_name);
}
Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, THREAD);
@ -1470,14 +1507,14 @@ void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
// We don't want ArchiveHeapTestClass to be abused to easily load/initialize arbitrary
// core-lib classes. You need to at least append to the bootclasspath.
stringStream st;
st.print("ArchiveHeapTestClass %s is not in unnamed module", ArchiveHeapTestClass);
st.print("ArchiveHeapTestClass %s is not in unnamed module", test_class_name);
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
}
if (ik->package() != nullptr) {
// This restriction makes HeapShared::is_a_test_class_in_unnamed_module() easy.
stringStream st;
st.print("ArchiveHeapTestClass %s is not in unnamed package", ArchiveHeapTestClass);
st.print("ArchiveHeapTestClass %s is not in unnamed package", test_class_name);
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
}
} else {
@ -1492,7 +1529,7 @@ void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
}
if (is_test_class) {
log_warning(cds)("Initializing ArchiveHeapTestClass %s ...", ArchiveHeapTestClass);
log_warning(cds)("Initializing ArchiveHeapTestClass %s ...", test_class_name);
}
ik->initialize(CHECK);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -186,18 +186,29 @@ public:
}
class CachedOopInfo {
// See "TEMP notes: What are these?" in archiveHeapWriter.hpp
// Used by CDSHeapVerifier.
oop _orig_referrer;
// The location of this object inside ArchiveHeapWriter::_buffer
size_t _buffer_offset;
// One or more fields in this object are pointing to non-null oops.
bool _has_oop_pointers;
// One or more fields in this object are pointing to MetaspaceObj
bool _has_native_pointers;
public:
CachedOopInfo(oop orig_referrer)
CachedOopInfo(oop orig_referrer, bool has_oop_pointers)
: _orig_referrer(orig_referrer),
_buffer_offset(0) {}
_buffer_offset(0),
_has_oop_pointers(has_oop_pointers),
_has_native_pointers(false) {}
oop orig_referrer() const { return _orig_referrer; }
void set_buffer_offset(size_t offset) { _buffer_offset = offset; }
size_t buffer_offset() const { return _buffer_offset; }
bool has_oop_pointers() const { return _has_oop_pointers; }
bool has_native_pointers() const { return _has_native_pointers; }
void set_has_native_pointers() { _has_native_pointers = true; }
};
private:
@ -237,7 +248,7 @@ private:
static DumpTimeKlassSubGraphInfoTable* _dump_time_subgraph_info_table;
static RunTimeKlassSubGraphInfoTable _run_time_subgraph_info_table;
static CachedOopInfo make_cached_oop_info();
static CachedOopInfo make_cached_oop_info(oop obj);
static void archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
bool is_full_module_graph);
@ -368,6 +379,9 @@ private:
// Scratch objects for archiving Klass::java_mirror()
static void set_scratch_java_mirror(Klass* k, oop mirror);
static void remove_scratch_objects(Klass* k);
static bool has_oop_pointers(oop obj);
static bool has_native_pointers(oop obj);
static void set_has_native_pointers(oop obj);
// We use the HeapShared::roots() array to make sure that objects stored in the
// archived heap region are not prematurely collected. These roots include:

View File

@ -96,6 +96,7 @@ bool MetaspaceShared::_remapped_readwrite = false;
void* MetaspaceShared::_shared_metaspace_static_top = nullptr;
intx MetaspaceShared::_relocation_delta;
char* MetaspaceShared::_requested_base_address;
bool MetaspaceShared::_use_optimized_module_handling = true;
// The CDS archive is divided into the following regions:
// rw - read-write metadata

View File

@ -52,6 +52,8 @@ class MetaspaceShared : AllStatic {
static void* _shared_metaspace_static_top;
static intx _relocation_delta;
static char* _requested_base_address;
static bool _use_optimized_module_handling;
public:
enum {
// core archive spaces
@ -158,6 +160,10 @@ public:
return is_windows;
}
// Can we skip some expensive operations related to modules?
static bool use_optimized_module_handling() { return NOT_CDS(false) CDS_ONLY(_use_optimized_module_handling); }
static void disable_optimized_module_handling() { _use_optimized_module_handling = false; }
private:
static void read_extra_data(JavaThread* current, const char* filename) NOT_CDS_RETURN;
static FileMapInfo* open_static_archive();