8270489: Support archived heap objects in EpsilonGC
Reviewed-by: shade, ccheung
This commit is contained in:
parent
dacd197897
commit
655ea6d42a
@ -1117,12 +1117,12 @@ void ArchiveBuilder::write_archive(FileMapInfo* mapinfo,
|
||||
closed_heap_regions,
|
||||
closed_heap_oopmaps,
|
||||
MetaspaceShared::first_closed_heap_region,
|
||||
MetaspaceShared::max_closed_heap_region);
|
||||
MetaspaceShared::max_num_closed_heap_regions);
|
||||
_total_open_heap_region_size = mapinfo->write_heap_regions(
|
||||
open_heap_regions,
|
||||
open_heap_oopmaps,
|
||||
MetaspaceShared::first_open_heap_region,
|
||||
MetaspaceShared::max_open_heap_region);
|
||||
MetaspaceShared::max_num_open_heap_regions);
|
||||
}
|
||||
|
||||
print_region_stats(mapinfo, closed_heap_regions, open_heap_regions);
|
||||
|
@ -262,8 +262,7 @@ void WriteClosure::do_oop(oop* o) {
|
||||
if (*o == NULL) {
|
||||
_dump_region->append_intptr_t(0);
|
||||
} else {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(),
|
||||
"Archiving heap object is not allowed");
|
||||
assert(HeapShared::can_write(), "sanity");
|
||||
_dump_region->append_intptr_t(
|
||||
(intptr_t)CompressedOops::encode_not_null(*o));
|
||||
}
|
||||
@ -308,13 +307,11 @@ void ReadClosure::do_tag(int tag) {
|
||||
|
||||
void ReadClosure::do_oop(oop *p) {
|
||||
narrowOop o = CompressedOops::narrow_oop_cast(nextPtr());
|
||||
if (CompressedOops::is_null(o) || !HeapShared::open_regions_mapped()) {
|
||||
if (CompressedOops::is_null(o) || !HeapShared::is_fully_available()) {
|
||||
*p = NULL;
|
||||
} else {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(),
|
||||
"Archived heap object is not allowed");
|
||||
assert(HeapShared::open_regions_mapped(),
|
||||
"Open archive heap region is not mapped");
|
||||
assert(HeapShared::can_use(), "sanity");
|
||||
assert(HeapShared::is_fully_available(), "must be");
|
||||
*p = HeapShared::decode_from_archive(o);
|
||||
}
|
||||
}
|
||||
|
@ -212,7 +212,7 @@ void FileMapHeader::populate(FileMapInfo* mapinfo, size_t core_region_alignment)
|
||||
_core_region_alignment = core_region_alignment;
|
||||
_obj_alignment = ObjectAlignmentInBytes;
|
||||
_compact_strings = CompactStrings;
|
||||
if (HeapShared::is_heap_object_archiving_allowed()) {
|
||||
if (DumpSharedSpaces && HeapShared::can_write()) {
|
||||
_narrow_oop_mode = CompressedOops::mode();
|
||||
_narrow_oop_base = CompressedOops::base();
|
||||
_narrow_oop_shift = CompressedOops::shift();
|
||||
@ -1598,7 +1598,6 @@ MapArchiveResult FileMapInfo::map_regions(int regions[], int num_regions, char*
|
||||
}
|
||||
|
||||
bool FileMapInfo::read_region(int i, char* base, size_t size) {
|
||||
assert(MetaspaceShared::use_windows_memory_mapping(), "used by windows only");
|
||||
FileMapRegion* si = space_at(i);
|
||||
log_info(cds)("Commit %s region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT " (%s)%s",
|
||||
is_static() ? "static " : "dynamic", i, p2i(base), p2i(base + size),
|
||||
@ -1612,6 +1611,11 @@ bool FileMapInfo::read_region(int i, char* base, size_t size) {
|
||||
read_bytes(base, size) != size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (VerifySharedSpaces && !region_crc_check(base, si->used(), si->crc())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1804,27 +1808,28 @@ MemRegion FileMapInfo::get_heap_regions_range_with_current_oop_encoding_mode() {
|
||||
return MemRegion((HeapWord*)start, (HeapWord*)end);
|
||||
}
|
||||
|
||||
//
|
||||
// Map the closed and open archive heap objects to the runtime java heap.
|
||||
//
|
||||
// The shared objects are mapped at (or close to ) the java heap top in
|
||||
// closed archive regions. The mapped objects contain no out-going
|
||||
// references to any other java heap regions. GC does not write into the
|
||||
// mapped closed archive heap region.
|
||||
//
|
||||
// The open archive heap objects are mapped below the shared objects in
|
||||
// the runtime java heap. The mapped open archive heap data only contains
|
||||
// references to the shared objects and open archive objects initially.
|
||||
// During runtime execution, out-going references to any other java heap
|
||||
// regions may be added. GC may mark and update references in the mapped
|
||||
// open archive objects.
|
||||
void FileMapInfo::map_heap_regions_impl() {
|
||||
if (!HeapShared::is_heap_object_archiving_allowed()) {
|
||||
log_info(cds)("CDS heap data is being ignored. UseG1GC, "
|
||||
"UseCompressedOops and UseCompressedClassPointers are required.");
|
||||
return;
|
||||
void FileMapInfo::map_or_load_heap_regions() {
|
||||
bool success = false;
|
||||
|
||||
if (can_use_heap_regions()) {
|
||||
if (HeapShared::can_map()) {
|
||||
success = map_heap_regions();
|
||||
} else if (HeapShared::can_load()) {
|
||||
success = HeapShared::load_heap_regions(this);
|
||||
} else {
|
||||
log_info(cds)("Cannot use CDS heap data. UseG1GC or UseEpsilonGC are required.");
|
||||
}
|
||||
}
|
||||
|
||||
if (!success) {
|
||||
MetaspaceShared::disable_full_module_graph();
|
||||
}
|
||||
}
|
||||
|
||||
bool FileMapInfo::can_use_heap_regions() {
|
||||
if (!has_heap_regions()) {
|
||||
return false;
|
||||
}
|
||||
if (JvmtiExport::should_post_class_file_load_hook() && JvmtiExport::has_early_class_hook_env()) {
|
||||
ShouldNotReachHere(); // CDS should have been disabled.
|
||||
// The archived objects are mapped at JVM start-up, but we don't know if
|
||||
@ -1859,9 +1864,27 @@ void FileMapInfo::map_heap_regions_impl() {
|
||||
if (narrow_klass_base() != CompressedKlassPointers::base() ||
|
||||
narrow_klass_shift() != CompressedKlassPointers::shift()) {
|
||||
log_info(cds)("CDS heap data cannot be used because the archive was created with an incompatible narrow klass encoding mode.");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// Map the closed and open archive heap objects to the runtime java heap.
|
||||
//
|
||||
// The shared objects are mapped at (or close to ) the java heap top in
|
||||
// closed archive regions. The mapped objects contain no out-going
|
||||
// references to any other java heap regions. GC does not write into the
|
||||
// mapped closed archive heap region.
|
||||
//
|
||||
// The open archive heap objects are mapped below the shared objects in
|
||||
// the runtime java heap. The mapped open archive heap data only contains
|
||||
// references to the shared objects and open archive objects initially.
|
||||
// During runtime execution, out-going references to any other java heap
|
||||
// regions may be added. GC may mark and update references in the mapped
|
||||
// open archive objects.
|
||||
void FileMapInfo::map_heap_regions_impl() {
|
||||
if (narrow_oop_mode() != CompressedOops::mode() ||
|
||||
narrow_oop_base() != CompressedOops::base() ||
|
||||
narrow_oop_shift() != CompressedOops::shift()) {
|
||||
@ -1920,14 +1943,14 @@ void FileMapInfo::map_heap_regions_impl() {
|
||||
|
||||
// Map the closed heap regions: GC does not write into these regions.
|
||||
if (map_heap_regions(MetaspaceShared::first_closed_heap_region,
|
||||
MetaspaceShared::max_closed_heap_region,
|
||||
MetaspaceShared::max_num_closed_heap_regions,
|
||||
/*is_open_archive=*/ false,
|
||||
&closed_heap_regions, &num_closed_heap_regions)) {
|
||||
HeapShared::set_closed_regions_mapped();
|
||||
|
||||
// Now, map the open heap regions: GC can write into these regions.
|
||||
if (map_heap_regions(MetaspaceShared::first_open_heap_region,
|
||||
MetaspaceShared::max_open_heap_region,
|
||||
MetaspaceShared::max_num_open_heap_regions,
|
||||
/*is_open_archive=*/ true,
|
||||
&open_heap_regions, &num_open_heap_regions)) {
|
||||
HeapShared::set_open_regions_mapped();
|
||||
@ -1936,10 +1959,8 @@ void FileMapInfo::map_heap_regions_impl() {
|
||||
}
|
||||
}
|
||||
|
||||
void FileMapInfo::map_heap_regions() {
|
||||
if (has_heap_regions()) {
|
||||
map_heap_regions_impl();
|
||||
}
|
||||
bool FileMapInfo::map_heap_regions() {
|
||||
map_heap_regions_impl();
|
||||
|
||||
if (!HeapShared::closed_regions_mapped()) {
|
||||
assert(closed_heap_regions == NULL &&
|
||||
@ -1948,7 +1969,9 @@ void FileMapInfo::map_heap_regions() {
|
||||
|
||||
if (!HeapShared::open_regions_mapped()) {
|
||||
assert(open_heap_regions == NULL && num_open_heap_regions == 0, "sanity");
|
||||
MetaspaceShared::disable_full_module_graph();
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2353,7 +2376,7 @@ void FileMapInfo::stop_sharing_and_unmap(const char* msg) {
|
||||
FileMapInfo *map_info = FileMapInfo::current_info();
|
||||
if (map_info) {
|
||||
map_info->fail_continue("%s", msg);
|
||||
for (int i = 0; i < MetaspaceShared::num_non_heap_spaces; i++) {
|
||||
for (int i = 0; i < MetaspaceShared::num_non_heap_regions; i++) {
|
||||
if (!HeapShared::is_heap_region(i)) {
|
||||
map_info->unmap_region(i);
|
||||
}
|
||||
|
@ -137,6 +137,7 @@ public:
|
||||
|
||||
|
||||
class FileMapRegion: private CDSFileMapRegion {
|
||||
public:
|
||||
void assert_is_heap_region() const {
|
||||
assert(_is_heap_region, "must be heap region");
|
||||
}
|
||||
@ -144,7 +145,6 @@ class FileMapRegion: private CDSFileMapRegion {
|
||||
assert(!_is_heap_region, "must not be heap region");
|
||||
}
|
||||
|
||||
public:
|
||||
static FileMapRegion* cast(CDSFileMapRegion* p) {
|
||||
return (FileMapRegion*)p;
|
||||
}
|
||||
@ -421,6 +421,8 @@ public:
|
||||
void set_requested_base(char* b) { header()->set_requested_base(b); }
|
||||
char* requested_base_address() const { return header()->requested_base_address(); }
|
||||
|
||||
narrowOop heap_obj_roots() const { return header()->heap_obj_roots(); }
|
||||
|
||||
class DynamicArchiveHeader* dynamic_header() const {
|
||||
assert(!is_static(), "must be");
|
||||
return (DynamicArchiveHeader*)header();
|
||||
@ -468,13 +470,15 @@ public:
|
||||
size_t read_bytes(void* buffer, size_t count);
|
||||
MapArchiveResult map_regions(int regions[], int num_regions, char* mapped_base_address, ReservedSpace rs);
|
||||
void unmap_regions(int regions[], int num_regions);
|
||||
void map_heap_regions() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
void map_or_load_heap_regions() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
void fixup_mapped_heap_regions() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
void patch_heap_embedded_pointers() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
void patch_heap_embedded_pointers(MemRegion* regions, int num_regions,
|
||||
int first_region_idx) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
bool has_heap_regions() NOT_CDS_JAVA_HEAP_RETURN_(false);
|
||||
MemRegion get_heap_regions_range_with_current_oop_encoding_mode() NOT_CDS_JAVA_HEAP_RETURN_(MemRegion());
|
||||
bool read_region(int i, char* base, size_t size);
|
||||
char* map_bitmap_region();
|
||||
void unmap_region(int i);
|
||||
bool verify_region_checksum(int i);
|
||||
void close();
|
||||
@ -574,25 +578,28 @@ public:
|
||||
MemRegion** regions_ret, int* num_regions_ret) NOT_CDS_JAVA_HEAP_RETURN_(false);
|
||||
bool region_crc_check(char* buf, size_t size, int expected_crc) NOT_CDS_RETURN_(false);
|
||||
void dealloc_heap_regions(MemRegion* regions, int num) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
bool can_use_heap_regions();
|
||||
bool load_heap_regions() NOT_CDS_JAVA_HEAP_RETURN_(false);
|
||||
bool map_heap_regions() NOT_CDS_JAVA_HEAP_RETURN_(false);
|
||||
void map_heap_regions_impl() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
char* map_bitmap_region();
|
||||
MapArchiveResult map_region(int i, intx addr_delta, char* mapped_base_address, ReservedSpace rs);
|
||||
bool read_region(int i, char* base, size_t size);
|
||||
bool relocate_pointers_in_core_regions(intx addr_delta);
|
||||
static size_t set_oopmaps_offset(GrowableArray<ArchiveHeapOopmapInfo> *oopmaps, size_t curr_size);
|
||||
static size_t write_oopmaps(GrowableArray<ArchiveHeapOopmapInfo> *oopmaps, size_t curr_offset, char* buffer);
|
||||
|
||||
address decode_start_address(FileMapRegion* spc, bool with_current_oop_encoding_mode);
|
||||
|
||||
// The starting address of spc, as calculated with CompressedOop::decode_non_null()
|
||||
address start_address_as_decoded_with_current_oop_encoding_mode(FileMapRegion* spc) {
|
||||
return decode_start_address(spc, true);
|
||||
}
|
||||
|
||||
public:
|
||||
// The starting address of spc, as calculated with HeapShared::decode_from_archive()
|
||||
address start_address_as_decoded_from_archive(FileMapRegion* spc) {
|
||||
return decode_start_address(spc, false);
|
||||
}
|
||||
|
||||
address decode_start_address(FileMapRegion* spc, bool with_current_oop_encoding_mode);
|
||||
private:
|
||||
|
||||
#if INCLUDE_JVMTI
|
||||
static ClassPathEntry** _classpath_entries_for_jvmti;
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "classfile/vmClasses.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/gcLocker.hpp"
|
||||
#include "gc/shared/gcVMOperations.hpp"
|
||||
#include "logging/log.hpp"
|
||||
@ -69,10 +70,23 @@
|
||||
|
||||
bool HeapShared::_closed_regions_mapped = false;
|
||||
bool HeapShared::_open_regions_mapped = false;
|
||||
bool HeapShared::_is_loaded = false;
|
||||
address HeapShared::_narrow_oop_base;
|
||||
int HeapShared::_narrow_oop_shift;
|
||||
DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL;
|
||||
|
||||
uintptr_t HeapShared::_loaded_heap_bottom = 0;
|
||||
uintptr_t HeapShared::_loaded_heap_top = 0;
|
||||
uintptr_t HeapShared::_dumptime_base_0 = UINTPTR_MAX;
|
||||
uintptr_t HeapShared::_dumptime_base_1 = UINTPTR_MAX;
|
||||
uintptr_t HeapShared::_dumptime_base_2 = UINTPTR_MAX;
|
||||
uintptr_t HeapShared::_dumptime_base_3 = UINTPTR_MAX;
|
||||
uintptr_t HeapShared::_dumptime_top = 0;
|
||||
intx HeapShared::_runtime_offset_0 = 0;
|
||||
intx HeapShared::_runtime_offset_1 = 0;
|
||||
intx HeapShared::_runtime_offset_2 = 0;
|
||||
intx HeapShared::_runtime_offset_3 = 0;
|
||||
bool HeapShared::_loading_failed = false;
|
||||
//
|
||||
// If you add new entries to the following tables, you should know what you're doing!
|
||||
//
|
||||
@ -118,7 +132,7 @@ OopHandle HeapShared::_roots;
|
||||
|
||||
#ifdef ASSERT
|
||||
bool HeapShared::is_archived_object_during_dumptime(oop p) {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(), "must be");
|
||||
assert(HeapShared::can_write(), "must be");
|
||||
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
|
||||
return Universe::heap()->is_archived_object(p);
|
||||
}
|
||||
@ -129,10 +143,14 @@ bool HeapShared::is_archived_object_during_dumptime(oop p) {
|
||||
// Java heap object archiving support
|
||||
//
|
||||
////////////////////////////////////////////////////////////////
|
||||
void HeapShared::fixup_mapped_regions() {
|
||||
FileMapInfo *mapinfo = FileMapInfo::current_info();
|
||||
mapinfo->fixup_mapped_heap_regions();
|
||||
void HeapShared::fixup_regions() {
|
||||
FileMapInfo* mapinfo = FileMapInfo::current_info();
|
||||
if (is_mapped()) {
|
||||
mapinfo->fixup_mapped_heap_regions();
|
||||
} else if (_loading_failed) {
|
||||
fill_failed_loaded_region();
|
||||
}
|
||||
if (is_fully_available()) {
|
||||
_roots = OopHandle(Universe::vm_global(), decode_from_archive(_roots_narrow));
|
||||
if (!MetaspaceShared::use_full_module_graph()) {
|
||||
// Need to remove all the archived java.lang.Module objects from HeapShared::roots().
|
||||
@ -205,7 +223,7 @@ int HeapShared::append_root(oop obj) {
|
||||
objArrayOop HeapShared::roots() {
|
||||
if (DumpSharedSpaces) {
|
||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
||||
if (!is_heap_object_archiving_allowed()) {
|
||||
if (!HeapShared::can_write()) {
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
@ -219,7 +237,7 @@ objArrayOop HeapShared::roots() {
|
||||
|
||||
void HeapShared::set_roots(narrowOop roots) {
|
||||
assert(UseSharedSpaces, "runtime only");
|
||||
assert(open_regions_mapped(), "must be");
|
||||
assert(is_fully_available(), "must be");
|
||||
_roots_narrow = roots;
|
||||
}
|
||||
|
||||
@ -244,7 +262,7 @@ oop HeapShared::get_root(int index, bool clear) {
|
||||
void HeapShared::clear_root(int index) {
|
||||
assert(index >= 0, "sanity");
|
||||
assert(UseSharedSpaces, "must be");
|
||||
if (open_regions_mapped()) {
|
||||
if (is_fully_available()) {
|
||||
if (log_is_enabled(Debug, cds, heap)) {
|
||||
oop old = roots()->obj_at(index);
|
||||
log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
|
||||
@ -321,7 +339,7 @@ void HeapShared::archive_klass_objects() {
|
||||
}
|
||||
|
||||
void HeapShared::run_full_gc_in_vm_thread() {
|
||||
if (is_heap_object_archiving_allowed()) {
|
||||
if (HeapShared::can_write()) {
|
||||
// Avoid fragmentation while archiving heap objects.
|
||||
// We do this inside a safepoint, so that no further allocation can happen after GC
|
||||
// has finished.
|
||||
@ -365,7 +383,7 @@ void HeapShared::archive_objects(GrowableArray<MemRegion>* closed_regions,
|
||||
}
|
||||
|
||||
void HeapShared::copy_closed_objects(GrowableArray<MemRegion>* closed_regions) {
|
||||
assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
|
||||
assert(HeapShared::can_write(), "must be");
|
||||
|
||||
G1CollectedHeap::heap()->begin_archive_alloc_range();
|
||||
|
||||
@ -382,7 +400,7 @@ void HeapShared::copy_closed_objects(GrowableArray<MemRegion>* closed_regions) {
|
||||
}
|
||||
|
||||
void HeapShared::copy_open_objects(GrowableArray<MemRegion>* open_regions) {
|
||||
assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
|
||||
assert(HeapShared::can_write(), "must be");
|
||||
|
||||
G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
|
||||
|
||||
@ -684,7 +702,7 @@ static void verify_the_heap(Klass* k, const char* which) {
|
||||
// ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
|
||||
// this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
|
||||
void HeapShared::resolve_classes(JavaThread* THREAD) {
|
||||
if (!is_mapped()) {
|
||||
if (!is_fully_available()) {
|
||||
return; // nothing to do
|
||||
}
|
||||
resolve_classes_for_subgraphs(closed_archive_subgraph_entry_fields,
|
||||
@ -722,7 +740,7 @@ void HeapShared::resolve_classes_for_subgraph_of(Klass* k, JavaThread* THREAD) {
|
||||
}
|
||||
|
||||
void HeapShared::initialize_from_archived_subgraph(Klass* k, JavaThread* THREAD) {
|
||||
if (!is_mapped()) {
|
||||
if (!is_fully_available()) {
|
||||
return; // nothing to do
|
||||
}
|
||||
|
||||
@ -1277,7 +1295,7 @@ void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
|
||||
}
|
||||
|
||||
void HeapShared::init_subgraph_entry_fields(TRAPS) {
|
||||
assert(is_heap_object_archiving_allowed(), "Sanity check");
|
||||
assert(HeapShared::can_write(), "must be");
|
||||
_dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable();
|
||||
init_subgraph_entry_fields(closed_archive_subgraph_entry_fields,
|
||||
num_closed_archive_subgraph_entry_fields,
|
||||
@ -1293,7 +1311,7 @@ void HeapShared::init_subgraph_entry_fields(TRAPS) {
|
||||
}
|
||||
|
||||
void HeapShared::init_for_dumping(TRAPS) {
|
||||
if (is_heap_object_archiving_allowed()) {
|
||||
if (HeapShared::can_write()) {
|
||||
_dumped_interned_strings = new (ResourceObj::C_HEAP, mtClass)DumpedInternedStrings();
|
||||
init_subgraph_entry_fields(CHECK);
|
||||
}
|
||||
@ -1453,4 +1471,270 @@ void HeapShared::patch_embedded_pointers(MemRegion region, address oopmap,
|
||||
bm.iterate(&patcher);
|
||||
}
|
||||
|
||||
// The CDS archive remembers each heap object by its address at dump time, but
|
||||
// the heap object may be loaded at a different address at run time. This structure is used
|
||||
// to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index)
|
||||
// to their runtime addresses.
|
||||
struct LoadedArchiveHeapRegion {
|
||||
int _region_index; // index for FileMapInfo::space_at(index)
|
||||
size_t _region_size; // number of bytes in this region
|
||||
uintptr_t _dumptime_base; // The dump-time (decoded) address of the first object in this region
|
||||
intx _runtime_offset; // If an object's dump time address P is within in this region, its
|
||||
// runtime address is P + _runtime_offset
|
||||
|
||||
static int comparator(const void* a, const void* b) {
|
||||
LoadedArchiveHeapRegion* reg_a = (LoadedArchiveHeapRegion*)a;
|
||||
LoadedArchiveHeapRegion* reg_b = (LoadedArchiveHeapRegion*)b;
|
||||
if (reg_a->_dumptime_base < reg_b->_dumptime_base) {
|
||||
return -1;
|
||||
} else if (reg_a->_dumptime_base == reg_b->_dumptime_base) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
uintptr_t top() {
|
||||
return _dumptime_base + _region_size;
|
||||
}
|
||||
};
|
||||
|
||||
void HeapShared::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_regions,
|
||||
int num_loaded_regions) {
|
||||
_dumptime_base_0 = loaded_regions[0]._dumptime_base;
|
||||
_dumptime_base_1 = loaded_regions[1]._dumptime_base;
|
||||
_dumptime_base_2 = loaded_regions[2]._dumptime_base;
|
||||
_dumptime_base_3 = loaded_regions[3]._dumptime_base;
|
||||
_dumptime_top = loaded_regions[num_loaded_regions-1].top();
|
||||
|
||||
_runtime_offset_0 = loaded_regions[0]._runtime_offset;
|
||||
_runtime_offset_1 = loaded_regions[1]._runtime_offset;
|
||||
_runtime_offset_2 = loaded_regions[2]._runtime_offset;
|
||||
_runtime_offset_3 = loaded_regions[3]._runtime_offset;
|
||||
|
||||
assert(2 <= num_loaded_regions && num_loaded_regions <= 4, "must be");
|
||||
if (num_loaded_regions < 4) {
|
||||
_dumptime_base_3 = UINTPTR_MAX;
|
||||
}
|
||||
if (num_loaded_regions < 3) {
|
||||
_dumptime_base_2 = UINTPTR_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
bool HeapShared::can_load() {
|
||||
return Universe::heap()->can_load_archived_objects();
|
||||
}
|
||||
|
||||
template <int NUM_LOADED_REGIONS>
|
||||
class PatchLoadedRegionPointers: public BitMapClosure {
|
||||
narrowOop* _start;
|
||||
intx _offset_0;
|
||||
intx _offset_1;
|
||||
intx _offset_2;
|
||||
intx _offset_3;
|
||||
uintptr_t _base_0;
|
||||
uintptr_t _base_1;
|
||||
uintptr_t _base_2;
|
||||
uintptr_t _base_3;
|
||||
uintptr_t _top;
|
||||
|
||||
static_assert(MetaspaceShared::max_num_heap_regions == 4, "can't handle more than 4 regions");
|
||||
static_assert(NUM_LOADED_REGIONS >= 2, "we have at least 2 loaded regions");
|
||||
static_assert(NUM_LOADED_REGIONS <= 4, "we have at most 4 loaded regions");
|
||||
|
||||
public:
|
||||
PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_regions)
|
||||
: _start(start),
|
||||
_offset_0(loaded_regions[0]._runtime_offset),
|
||||
_offset_1(loaded_regions[1]._runtime_offset),
|
||||
_offset_2(loaded_regions[2]._runtime_offset),
|
||||
_offset_3(loaded_regions[3]._runtime_offset),
|
||||
_base_0(loaded_regions[0]._dumptime_base),
|
||||
_base_1(loaded_regions[1]._dumptime_base),
|
||||
_base_2(loaded_regions[2]._dumptime_base),
|
||||
_base_3(loaded_regions[3]._dumptime_base) {
|
||||
_top = loaded_regions[NUM_LOADED_REGIONS-1].top();
|
||||
}
|
||||
|
||||
bool do_bit(size_t offset) {
|
||||
narrowOop* p = _start + offset;
|
||||
narrowOop v = *p;
|
||||
assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
|
||||
uintptr_t o = cast_from_oop<uintptr_t>(HeapShared::decode_from_archive(v));
|
||||
assert(_base_0 <= o && o < _top, "must be");
|
||||
|
||||
|
||||
// We usually have only 2 regions for the default archive. Use template to avoid unnecessary comparisons.
|
||||
if (NUM_LOADED_REGIONS > 3 && o >= _base_3) {
|
||||
o += _offset_3;
|
||||
} else if (NUM_LOADED_REGIONS > 2 && o >= _base_2) {
|
||||
o += _offset_2;
|
||||
} else if (o >= _base_1) {
|
||||
o += _offset_1;
|
||||
} else {
|
||||
o += _offset_0;
|
||||
}
|
||||
HeapShared::assert_in_loaded_heap(o);
|
||||
RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o));
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
int HeapShared::init_loaded_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions,
|
||||
uintptr_t* buffer_ret) {
|
||||
size_t total_bytes = 0;
|
||||
int num_loaded_regions = 0;
|
||||
for (int i = MetaspaceShared::first_archive_heap_region;
|
||||
i <= MetaspaceShared::last_archive_heap_region; i++) {
|
||||
FileMapRegion* r = mapinfo->space_at(i);
|
||||
r->assert_is_heap_region();
|
||||
if (r->used() > 0) {
|
||||
assert(is_aligned(r->used(), HeapWordSize), "must be");
|
||||
total_bytes += r->used();
|
||||
LoadedArchiveHeapRegion* ri = &loaded_regions[num_loaded_regions++];
|
||||
ri->_region_index = i;
|
||||
ri->_region_size = r->used();
|
||||
ri->_dumptime_base = (uintptr_t)mapinfo->start_address_as_decoded_from_archive(r);
|
||||
}
|
||||
}
|
||||
|
||||
assert(is_aligned(total_bytes, HeapWordSize), "must be");
|
||||
uintptr_t buffer = (uintptr_t)
|
||||
Universe::heap()->allocate_loaded_archive_space(total_bytes / HeapWordSize);
|
||||
_loaded_heap_bottom = buffer;
|
||||
_loaded_heap_top = buffer + total_bytes;
|
||||
|
||||
*buffer_ret = buffer;
|
||||
return num_loaded_regions;
|
||||
}
|
||||
|
||||
void HeapShared::sort_loaded_regions(LoadedArchiveHeapRegion* loaded_regions, int num_loaded_regions,
|
||||
uintptr_t buffer) {
|
||||
// Find the relocation offset of the pointers in each region
|
||||
qsort(loaded_regions, num_loaded_regions, sizeof(LoadedArchiveHeapRegion),
|
||||
LoadedArchiveHeapRegion::comparator);
|
||||
|
||||
uintptr_t p = buffer;
|
||||
for (int i = 0; i < num_loaded_regions; i++) {
|
||||
// This region will be loaded at p, so all objects inside this
|
||||
// region will be shifted by ri->offset
|
||||
LoadedArchiveHeapRegion* ri = &loaded_regions[i];
|
||||
ri->_runtime_offset = p - ri->_dumptime_base;
|
||||
p += ri->_region_size;
|
||||
}
|
||||
assert(p == _loaded_heap_top, "must be");
|
||||
}
|
||||
|
||||
bool HeapShared::load_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions,
|
||||
int num_loaded_regions, uintptr_t buffer) {
|
||||
uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region();
|
||||
uintptr_t load_address = buffer;
|
||||
for (int i = 0; i < num_loaded_regions; i++) {
|
||||
LoadedArchiveHeapRegion* ri = &loaded_regions[i];
|
||||
FileMapRegion* r = mapinfo->space_at(ri->_region_index);
|
||||
|
||||
if (!mapinfo->read_region(ri->_region_index, (char*)load_address, r->used())) {
|
||||
// There's no easy way to free the buffer, so we will fill it with zero later
|
||||
// in fill_failed_loaded_region(), and it will eventually be GC'ed.
|
||||
log_warning(cds)("Loading of heap region %d has failed. Archived objects are disabled", i);
|
||||
_loading_failed = true;
|
||||
return false;
|
||||
}
|
||||
log_info(cds)("Loaded heap region #%d at base " INTPTR_FORMAT " size = " SIZE_FORMAT_W(8) " bytes, delta = " INTX_FORMAT,
|
||||
ri->_region_index, load_address, ri->_region_size, ri->_runtime_offset);
|
||||
|
||||
uintptr_t oopmap = bitmap_base + r->oopmap_offset();
|
||||
BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits());
|
||||
|
||||
if (num_loaded_regions == 4) {
|
||||
PatchLoadedRegionPointers<4> patcher((narrowOop*)load_address, loaded_regions);
|
||||
bm.iterate(&patcher);
|
||||
} else if (num_loaded_regions == 3) {
|
||||
PatchLoadedRegionPointers<3> patcher((narrowOop*)load_address, loaded_regions);
|
||||
bm.iterate(&patcher);
|
||||
} else {
|
||||
assert(num_loaded_regions == 2, "must be");
|
||||
PatchLoadedRegionPointers<2> patcher((narrowOop*)load_address, loaded_regions);
|
||||
bm.iterate(&patcher);
|
||||
}
|
||||
|
||||
load_address += r->used();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool HeapShared::load_heap_regions(FileMapInfo* mapinfo) {
|
||||
init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift());
|
||||
|
||||
LoadedArchiveHeapRegion loaded_regions[MetaspaceShared::max_num_heap_regions];
|
||||
memset(loaded_regions, 0, sizeof(loaded_regions));
|
||||
|
||||
uintptr_t buffer;
|
||||
int num_loaded_regions = init_loaded_regions(mapinfo, loaded_regions, &buffer);
|
||||
sort_loaded_regions(loaded_regions, num_loaded_regions, buffer);
|
||||
if (!load_regions(mapinfo, loaded_regions, num_loaded_regions, buffer)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
init_loaded_heap_relocation(loaded_regions, num_loaded_regions);
|
||||
_is_loaded = true;
|
||||
set_roots(mapinfo->heap_obj_roots());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure {
|
||||
ResourceHashtable<uintptr_t, bool>* _table;
|
||||
|
||||
public:
|
||||
VerifyLoadedHeapEmbeddedPointers(ResourceHashtable<uintptr_t, bool>* table) : _table(table) {}
|
||||
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
// This should be called before the loaded regions are modified, so all the embedded pointers
|
||||
// must be NULL, or must point to a valid object in the loaded regions.
|
||||
narrowOop v = *p;
|
||||
if (!CompressedOops::is_null(v)) {
|
||||
oop o = CompressedOops::decode_not_null(v);
|
||||
uintptr_t u = cast_from_oop<uintptr_t>(o);
|
||||
HeapShared::assert_in_loaded_heap(u);
|
||||
guarantee(_table->contains(u), "must point to beginning of object in loaded archived regions");
|
||||
}
|
||||
}
|
||||
virtual void do_oop(oop* p) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
};
|
||||
|
||||
void HeapShared::verify_loaded_heap() {
|
||||
if (!VerifyArchivedFields || !is_loaded()) {
|
||||
return;
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
ResourceHashtable<uintptr_t, bool> table;
|
||||
VerifyLoadedHeapEmbeddedPointers verifier(&table);
|
||||
HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
|
||||
HeapWord* top = (HeapWord*)_loaded_heap_top;
|
||||
|
||||
for (HeapWord* p = bottom; p < top; ) {
|
||||
oop o = cast_to_oop(p);
|
||||
table.put(cast_from_oop<uintptr_t>(o), true);
|
||||
p += o->size();
|
||||
}
|
||||
|
||||
for (HeapWord* p = bottom; p < top; ) {
|
||||
oop o = cast_to_oop(p);
|
||||
o->oop_iterate(&verifier);
|
||||
p += o->size();
|
||||
}
|
||||
}
|
||||
|
||||
void HeapShared::fill_failed_loaded_region() {
|
||||
assert(_loading_failed, "must be");
|
||||
HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
|
||||
HeapWord* top = (HeapWord*)_loaded_heap_top;
|
||||
Universe::heap()->fill_with_objects(bottom, top - bottom);
|
||||
}
|
||||
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||
|
@ -42,6 +42,7 @@
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
class DumpedInternedStrings;
|
||||
class FileMapInfo;
|
||||
|
||||
struct ArchivableStaticFieldInfo {
|
||||
const char* klass_name;
|
||||
@ -138,22 +139,92 @@ class ArchivedKlassSubGraphInfoRecord {
|
||||
};
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||
|
||||
struct LoadedArchiveHeapRegion;
|
||||
|
||||
class HeapShared: AllStatic {
|
||||
friend class VerifySharedOopClosure;
|
||||
private:
|
||||
|
||||
public:
|
||||
// At runtime, heap regions in the CDS archive can be used in two different ways,
|
||||
// depending on the GC type:
|
||||
// - Mapped: (G1 only) the regions are directly mapped into the Java heap
|
||||
// - Loaded: At VM start-up, the objects in the heap regions are copied into the
|
||||
// Java heap. This is easier to implement than mapping but
|
||||
// slightly less efficient, as the embedded pointers need to be relocated.
|
||||
static bool can_use() { return can_map() || can_load(); }
|
||||
|
||||
// Can this VM write heap regions into the CDS archive? Currently only G1+compressed{oops,cp}
|
||||
static bool can_write() {
|
||||
assert(DumpSharedSpaces, "only when writing static archive");
|
||||
CDS_JAVA_HEAP_ONLY(return (UseG1GC && UseCompressedOops && UseCompressedClassPointers);)
|
||||
NOT_CDS_JAVA_HEAP(return false;)
|
||||
}
|
||||
|
||||
// Can this VM map archived heap regions? Currently only G1+compressed{oops,cp}
|
||||
static bool can_map() {
|
||||
CDS_JAVA_HEAP_ONLY(return (UseG1GC && UseCompressedOops && UseCompressedClassPointers);)
|
||||
NOT_CDS_JAVA_HEAP(return false;)
|
||||
}
|
||||
static bool is_mapped() {
|
||||
return closed_regions_mapped() && open_regions_mapped();
|
||||
}
|
||||
|
||||
// Can this VM load the objects from archived heap regions into the heap at start-up?
|
||||
static bool can_load() NOT_CDS_JAVA_HEAP_RETURN_(false);
|
||||
static void verify_loaded_heap() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static bool is_loaded() {
|
||||
CDS_JAVA_HEAP_ONLY(return _is_loaded;)
|
||||
NOT_CDS_JAVA_HEAP(return false;)
|
||||
}
|
||||
|
||||
static bool are_archived_strings_available() {
|
||||
return is_loaded() || closed_regions_mapped();
|
||||
}
|
||||
static bool are_archived_mirrors_available() {
|
||||
return is_fully_available();
|
||||
}
|
||||
static bool is_fully_available() {
|
||||
return is_loaded() || is_mapped();
|
||||
}
|
||||
|
||||
private:
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
static bool _closed_regions_mapped;
|
||||
static bool _open_regions_mapped;
|
||||
static bool _is_loaded;
|
||||
static DumpedInternedStrings *_dumped_interned_strings;
|
||||
|
||||
// Support for loaded archived heap. These are cached values from
|
||||
// LoadedArchiveHeapRegion's.
|
||||
static uintptr_t _dumptime_base_0;
|
||||
static uintptr_t _dumptime_base_1;
|
||||
static uintptr_t _dumptime_base_2;
|
||||
static uintptr_t _dumptime_base_3;
|
||||
static uintptr_t _dumptime_top;
|
||||
static intx _runtime_offset_0;
|
||||
static intx _runtime_offset_1;
|
||||
static intx _runtime_offset_2;
|
||||
static intx _runtime_offset_3;
|
||||
static uintptr_t _loaded_heap_bottom;
|
||||
static uintptr_t _loaded_heap_top;
|
||||
static bool _loading_failed;
|
||||
|
||||
public:
|
||||
static unsigned oop_hash(oop const& p);
|
||||
static unsigned string_oop_hash(oop const& string) {
|
||||
return java_lang_String::hash_code(string);
|
||||
}
|
||||
|
||||
static bool load_heap_regions(FileMapInfo* mapinfo);
|
||||
static void assert_in_loaded_heap(uintptr_t o) {
|
||||
assert(is_in_loaded_heap(o), "must be");
|
||||
}
|
||||
|
||||
private:
|
||||
static bool is_in_loaded_heap(uintptr_t o) {
|
||||
return (_loaded_heap_bottom <= o && o < _loaded_heap_top);
|
||||
}
|
||||
|
||||
typedef ResourceHashtable<oop, oop,
|
||||
15889, // prime number
|
||||
ResourceObj::C_HEAP,
|
||||
@ -274,6 +345,16 @@ private:
|
||||
resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS);
|
||||
static void resolve_or_init(Klass* k, bool do_init, TRAPS);
|
||||
static void init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record);
|
||||
|
||||
static int init_loaded_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions,
|
||||
uintptr_t* buffer_ret);
|
||||
static void sort_loaded_regions(LoadedArchiveHeapRegion* loaded_regions, int num_loaded_regions,
|
||||
uintptr_t buffer);
|
||||
static bool load_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions,
|
||||
int num_loaded_regions, uintptr_t buffer);
|
||||
static void init_loaded_heap_relocation(LoadedArchiveHeapRegion* reloc_info,
|
||||
int num_loaded_regions);
|
||||
static void fill_failed_loaded_region();
|
||||
public:
|
||||
static void reset_archived_object_states(TRAPS);
|
||||
static void create_archived_object_cache() {
|
||||
@ -336,11 +417,6 @@ private:
|
||||
public:
|
||||
static void run_full_gc_in_vm_thread() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
|
||||
static bool is_heap_object_archiving_allowed() {
|
||||
CDS_JAVA_HEAP_ONLY(return (UseG1GC && UseCompressedOops && UseCompressedClassPointers);)
|
||||
NOT_CDS_JAVA_HEAP(return false;)
|
||||
}
|
||||
|
||||
static bool is_heap_region(int idx) {
|
||||
CDS_JAVA_HEAP_ONLY(return (idx >= MetaspaceShared::first_closed_heap_region &&
|
||||
idx <= MetaspaceShared::last_open_heap_region);)
|
||||
@ -363,11 +439,8 @@ private:
|
||||
CDS_JAVA_HEAP_ONLY(return _open_regions_mapped;)
|
||||
NOT_CDS_JAVA_HEAP_RETURN_(false);
|
||||
}
|
||||
static bool is_mapped() {
|
||||
return closed_regions_mapped() && open_regions_mapped();
|
||||
}
|
||||
|
||||
static void fixup_mapped_regions() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void fixup_regions() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
|
||||
static bool is_archived_object_during_dumptime(oop p) NOT_CDS_JAVA_HEAP_RETURN_(false);
|
||||
|
||||
|
@ -33,7 +33,21 @@
|
||||
|
||||
inline oop HeapShared::decode_from_archive(narrowOop v) {
|
||||
assert(!CompressedOops::is_null(v), "narrow oop value can never be zero");
|
||||
oop result = cast_to_oop((uintptr_t)_narrow_oop_base + ((uintptr_t)v << _narrow_oop_shift));
|
||||
uintptr_t p = ((uintptr_t)_narrow_oop_base) + ((uintptr_t)v << _narrow_oop_shift);
|
||||
if (p >= _dumptime_base_0) {
|
||||
assert(p < _dumptime_top, "must be");
|
||||
if (p >= _dumptime_base_3) {
|
||||
p += _runtime_offset_3;
|
||||
} else if (p >= _dumptime_base_2) {
|
||||
p += _runtime_offset_2;
|
||||
} else if (p >= _dumptime_base_1) {
|
||||
p += _runtime_offset_1;
|
||||
} else {
|
||||
p += _runtime_offset_0;
|
||||
}
|
||||
}
|
||||
|
||||
oop result = cast_to_oop((uintptr_t)p);
|
||||
assert(is_object_aligned(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
|
||||
return result;
|
||||
}
|
||||
|
@ -367,6 +367,11 @@ void MetaspaceShared::serialize(SerializeClosure* soc) {
|
||||
soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
|
||||
soc->do_tag(sizeof(Symbol));
|
||||
|
||||
// Need to do this first, as subsequent steps may call virtual functions
|
||||
// in archived Metadata objects.
|
||||
CppVtables::serialize(soc);
|
||||
soc->do_tag(--tag);
|
||||
|
||||
// Dump/restore miscellaneous metadata.
|
||||
JavaClasses::serialize_offsets(soc);
|
||||
Universe::serialize(soc);
|
||||
@ -388,9 +393,6 @@ void MetaspaceShared::serialize(SerializeClosure* soc) {
|
||||
SystemDictionaryShared::serialize_vm_classes(soc);
|
||||
soc->do_tag(--tag);
|
||||
|
||||
CppVtables::serialize(soc);
|
||||
soc->do_tag(--tag);
|
||||
|
||||
CDS_JAVA_HEAP_ONLY(ClassLoaderDataShared::serialize(soc);)
|
||||
|
||||
LambdaFormInvokers::serialize(soc);
|
||||
@ -827,7 +829,7 @@ bool MetaspaceShared::try_link_class(JavaThread* current, InstanceKlass* ik) {
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
void VM_PopulateDumpSharedSpace::dump_java_heap_objects(GrowableArray<Klass*>* klasses) {
|
||||
if(!HeapShared::is_heap_object_archiving_allowed()) {
|
||||
if(!HeapShared::can_write()) {
|
||||
log_info(cds)(
|
||||
"Archived java heap is not supported as UseG1GC, "
|
||||
"UseCompressedOops and UseCompressedClassPointers are required."
|
||||
@ -864,7 +866,7 @@ void VM_PopulateDumpSharedSpace::dump_java_heap_objects(GrowableArray<Klass*>* k
|
||||
}
|
||||
|
||||
void VM_PopulateDumpSharedSpace::dump_heap_oopmaps() {
|
||||
if (HeapShared::is_heap_object_archiving_allowed()) {
|
||||
if (HeapShared::can_write()) {
|
||||
_closed_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2);
|
||||
dump_heap_oopmaps(_closed_heap_regions, _closed_heap_oopmaps);
|
||||
|
||||
@ -1136,7 +1138,7 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
|
||||
|
||||
// map_heap_regions() compares the current narrow oop and klass encodings
|
||||
// with the archived ones, so it must be done after all encodings are determined.
|
||||
static_mapinfo->map_heap_regions();
|
||||
static_mapinfo->map_or_load_heap_regions();
|
||||
}
|
||||
});
|
||||
log_info(cds)("optimized module handling: %s", MetaspaceShared::use_optimized_module_handling() ? "enabled" : "disabled");
|
||||
@ -1420,7 +1422,7 @@ void MetaspaceShared::initialize_shared_spaces() {
|
||||
FileMapInfo *static_mapinfo = FileMapInfo::current_info();
|
||||
|
||||
// Verify various attributes of the archive, plus initialize the
|
||||
// shared string/symbol tables
|
||||
// shared string/symbol tables.
|
||||
char* buffer = static_mapinfo->serialized_data();
|
||||
intptr_t* array = (intptr_t*)buffer;
|
||||
ReadClosure rc(&array);
|
||||
@ -1429,7 +1431,10 @@ void MetaspaceShared::initialize_shared_spaces() {
|
||||
// Initialize the run-time symbol table.
|
||||
SymbolTable::create_table();
|
||||
|
||||
// Finish up archived heap initialization. These must be
|
||||
// done after ReadClosure.
|
||||
static_mapinfo->patch_heap_embedded_pointers();
|
||||
HeapShared::verify_loaded_heap();
|
||||
|
||||
// Close the mapinfo file
|
||||
static_mapinfo->close();
|
||||
@ -1512,8 +1517,15 @@ bool MetaspaceShared::use_full_module_graph() {
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
bool result = _use_optimized_module_handling && _use_full_module_graph &&
|
||||
(UseSharedSpaces || DumpSharedSpaces) && HeapShared::is_heap_object_archiving_allowed();
|
||||
bool result = _use_optimized_module_handling && _use_full_module_graph;
|
||||
if (DumpSharedSpaces) {
|
||||
result &= HeapShared::can_write();
|
||||
} else if (UseSharedSpaces) {
|
||||
result &= HeapShared::can_use();
|
||||
} else {
|
||||
result = false;
|
||||
}
|
||||
|
||||
if (result && UseSharedSpaces) {
|
||||
// Classes used by the archived full module graph are loaded in JVMTI early phase.
|
||||
assert(!(JvmtiExport::should_post_class_file_load_hook() && JvmtiExport::has_early_class_hook_env()),
|
||||
|
@ -62,15 +62,19 @@ class MetaspaceShared : AllStatic {
|
||||
ro = 1, // read-only shared space
|
||||
bm = 2, // relocation bitmaps (freed after file mapping is finished)
|
||||
num_core_region = 2, // rw and ro
|
||||
num_non_heap_spaces = 3, // rw and ro and bm
|
||||
num_non_heap_regions = 3, // rw and ro and bm
|
||||
|
||||
// mapped java heap regions
|
||||
// java heap regions
|
||||
first_closed_heap_region = bm + 1,
|
||||
max_closed_heap_region = 2,
|
||||
last_closed_heap_region = first_closed_heap_region + max_closed_heap_region - 1,
|
||||
max_num_closed_heap_regions = 2,
|
||||
last_closed_heap_region = first_closed_heap_region + max_num_closed_heap_regions - 1,
|
||||
first_open_heap_region = last_closed_heap_region + 1,
|
||||
max_open_heap_region = 2,
|
||||
last_open_heap_region = first_open_heap_region + max_open_heap_region - 1,
|
||||
max_num_open_heap_regions = 2,
|
||||
last_open_heap_region = first_open_heap_region + max_num_open_heap_regions - 1,
|
||||
max_num_heap_regions = max_num_closed_heap_regions + max_num_open_heap_regions,
|
||||
|
||||
first_archive_heap_region = first_closed_heap_region,
|
||||
last_archive_heap_region = last_open_heap_region,
|
||||
|
||||
last_valid_region = last_open_heap_region,
|
||||
n_regions = last_valid_region + 1 // total number of regions
|
||||
|
@ -900,7 +900,7 @@ void java_lang_Class::fixup_mirror(Klass* k, TRAPS) {
|
||||
}
|
||||
|
||||
if (k->is_shared() && k->has_archived_mirror_index()) {
|
||||
if (HeapShared::open_regions_mapped()) {
|
||||
if (HeapShared::are_archived_mirrors_available()) {
|
||||
bool present = restore_archived_mirror(k, Handle(), Handle(), Handle(), CHECK);
|
||||
assert(present, "Missing archived mirror for %s", k->external_name());
|
||||
return;
|
||||
@ -1143,8 +1143,7 @@ static void set_klass_field_in_archived_mirror(oop mirror_obj, int offset, Klass
|
||||
}
|
||||
|
||||
void java_lang_Class::archive_basic_type_mirrors() {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(),
|
||||
"HeapShared::is_heap_object_archiving_allowed() must be true");
|
||||
assert(HeapShared::can_write(), "must be");
|
||||
|
||||
for (int t = T_BOOLEAN; t < T_VOID+1; t++) {
|
||||
BasicType bt = (BasicType)t;
|
||||
@ -1182,8 +1181,7 @@ void java_lang_Class::archive_basic_type_mirrors() {
|
||||
// be used at runtime, new mirror object is created for the shared
|
||||
// class. The _has_archived_raw_mirror is cleared also during the process.
|
||||
oop java_lang_Class::archive_mirror(Klass* k) {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(),
|
||||
"HeapShared::is_heap_object_archiving_allowed() must be true");
|
||||
assert(HeapShared::can_write(), "must be");
|
||||
|
||||
// Mirror is already archived
|
||||
if (k->has_archived_mirror_index()) {
|
||||
@ -1337,7 +1335,9 @@ bool java_lang_Class::restore_archived_mirror(Klass *k,
|
||||
|
||||
// mirror is archived, restore
|
||||
log_debug(cds, mirror)("Archived mirror is: " PTR_FORMAT, p2i(m));
|
||||
assert(Universe::heap()->is_archived_object(m), "must be archived mirror object");
|
||||
if (HeapShared::is_mapped()) {
|
||||
assert(Universe::heap()->is_archived_object(m), "must be archived mirror object");
|
||||
}
|
||||
assert(as_Klass(m) == k, "must be");
|
||||
Handle mirror(THREAD, m);
|
||||
|
||||
|
@ -72,11 +72,12 @@ inline oop read_string_from_compact_hashtable(address base_address, u4 offset) {
|
||||
return HeapShared::decode_from_archive(v);
|
||||
}
|
||||
|
||||
static CompactHashtable<
|
||||
typedef CompactHashtable<
|
||||
const jchar*, oop,
|
||||
read_string_from_compact_hashtable,
|
||||
java_lang_String::equals
|
||||
> _shared_table;
|
||||
java_lang_String::equals> SharedStringTable;
|
||||
|
||||
static SharedStringTable _shared_table;
|
||||
#endif
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
@ -761,7 +762,7 @@ public:
|
||||
};
|
||||
|
||||
void StringTable::write_to_archive(const DumpedInternedStrings* dumped_interned_strings) {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(), "must be");
|
||||
assert(HeapShared::can_write(), "must be");
|
||||
|
||||
_shared_table.reset();
|
||||
CompactHashtableWriter writer(_items_count, ArchiveBuilder::string_stats());
|
||||
@ -779,9 +780,47 @@ void StringTable::serialize_shared_table_header(SerializeClosure* soc) {
|
||||
if (soc->writing()) {
|
||||
// Sanity. Make sure we don't use the shared table at dump time
|
||||
_shared_table.reset();
|
||||
} else if (!HeapShared::closed_regions_mapped()) {
|
||||
} else if (!HeapShared::are_archived_strings_available()) {
|
||||
_shared_table.reset();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class SharedStringTransfer {
|
||||
JavaThread* _current;
|
||||
public:
|
||||
SharedStringTransfer(JavaThread* current) : _current(current) {}
|
||||
|
||||
void do_value(oop string) {
|
||||
JavaThread* THREAD = _current;
|
||||
ExceptionMark rm(THREAD);
|
||||
HandleMark hm(THREAD);
|
||||
StringTable::intern(string, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
// The archived constant pools contains strings that must be in the interned string table.
|
||||
// If we fail here, it means the VM runs out of memory during bootstrap, so there's no point
|
||||
// of trying to recover from here.
|
||||
vm_exit_during_initialization("Failed to transfer shared strings to interned string table");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// If the CDS archive heap is loaded (not mapped) into the old generation,
|
||||
// it's possible for the shared strings to move due to full GC, making the
|
||||
// _shared_table invalid. Therefore, we proactively copy all the shared
|
||||
// strings into the _local_table, which can deal with oop relocation.
|
||||
void StringTable::transfer_shared_strings_to_local_table() {
|
||||
assert(HeapShared::is_loaded(), "must be");
|
||||
EXCEPTION_MARK;
|
||||
|
||||
// Reset _shared_table so that during the transfer, StringTable::intern()
|
||||
// will not look up from there. Instead, it will create a new entry in
|
||||
// _local_table for each element in shared_table_copy.
|
||||
SharedStringTable shared_table_copy = _shared_table;
|
||||
_shared_table.reset();
|
||||
|
||||
SharedStringTransfer transfer(THREAD);
|
||||
shared_table_copy.iterate(&transfer);
|
||||
}
|
||||
|
||||
#endif //INCLUDE_CDS_JAVA_HEAP
|
||||
|
@ -112,6 +112,7 @@ class StringTable : public CHeapObj<mtSymbol>{
|
||||
static oop create_archived_string(oop s) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
|
||||
static void write_to_archive(const DumpedInternedStrings* dumped_interned_strings) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void serialize_shared_table_header(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void transfer_shared_strings_to_local_table() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
|
||||
// Jcmd
|
||||
static void dump(outputStream* st, bool verbose=false);
|
||||
|
@ -1660,7 +1660,7 @@ void SystemDictionaryShared::update_archived_mirror_native_pointers_for(LambdaPr
|
||||
}
|
||||
|
||||
void SystemDictionaryShared::update_archived_mirror_native_pointers() {
|
||||
if (!HeapShared::open_regions_mapped()) {
|
||||
if (!HeapShared::are_archived_mirrors_available()) {
|
||||
return;
|
||||
}
|
||||
if (MetaspaceShared::relocation_delta() == 0) {
|
||||
|
@ -133,13 +133,13 @@ void vmClasses::resolve_all(TRAPS) {
|
||||
// ConstantPool::restore_unshareable_info (restores the archived
|
||||
// resolved_references array object).
|
||||
//
|
||||
// HeapShared::fixup_mapped_regions() fills the empty
|
||||
// HeapShared::fixup_regions() fills the empty
|
||||
// spaces in the archived heap regions and may use
|
||||
// vmClasses::Object_klass(), so we can do this only after
|
||||
// Object_klass is resolved. See the above resolve_through()
|
||||
// call. No mirror objects are accessed/restored in the above call.
|
||||
// Mirrors are restored after java.lang.Class is loaded.
|
||||
HeapShared::fixup_mapped_regions();
|
||||
HeapShared::fixup_regions();
|
||||
|
||||
// Initialize the constant pool for the Object_class
|
||||
assert(Object_klass()->is_shared(), "must be");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved.
|
||||
* Copyright (c) 2017, 2021, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -104,7 +104,7 @@ EpsilonHeap* EpsilonHeap::heap() {
|
||||
return named_heap<EpsilonHeap>(CollectedHeap::Epsilon);
|
||||
}
|
||||
|
||||
HeapWord* EpsilonHeap::allocate_work(size_t size) {
|
||||
HeapWord* EpsilonHeap::allocate_work(size_t size, bool verbose) {
|
||||
assert(is_object_aligned(size), "Allocation size should be aligned: " SIZE_FORMAT, size);
|
||||
|
||||
HeapWord* res = NULL;
|
||||
@ -150,7 +150,7 @@ HeapWord* EpsilonHeap::allocate_work(size_t size) {
|
||||
size_t used = _space->used();
|
||||
|
||||
// Allocation successful, update counters
|
||||
{
|
||||
if (verbose) {
|
||||
size_t last = _last_counter_update;
|
||||
if ((used - last >= _step_counter_update) && Atomic::cmpxchg(&_last_counter_update, last, used) == last) {
|
||||
_monitoring_support->update_counters();
|
||||
@ -158,7 +158,7 @@ HeapWord* EpsilonHeap::allocate_work(size_t size) {
|
||||
}
|
||||
|
||||
// ...and print the occupancy line, if needed
|
||||
{
|
||||
if (verbose) {
|
||||
size_t last = _last_heap_print;
|
||||
if ((used - last >= _step_heap_print) && Atomic::cmpxchg(&_last_heap_print, last, used) == last) {
|
||||
print_heap_info(used);
|
||||
@ -263,6 +263,11 @@ HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exc
|
||||
return allocate_work(size);
|
||||
}
|
||||
|
||||
HeapWord* EpsilonHeap::allocate_loaded_archive_space(size_t size) {
|
||||
// Cannot use verbose=true because Metaspace is not initialized
|
||||
return allocate_work(size, /* verbose = */false);
|
||||
}
|
||||
|
||||
void EpsilonHeap::collect(GCCause::Cause cause) {
|
||||
switch (cause) {
|
||||
case GCCause::_metadata_GC_threshold:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
|
||||
* Copyright (c) 2017, 2021, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -88,7 +88,7 @@ public:
|
||||
}
|
||||
|
||||
// Allocation
|
||||
HeapWord* allocate_work(size_t size);
|
||||
HeapWord* allocate_work(size_t size, bool verbose = true);
|
||||
virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
|
||||
virtual HeapWord* allocate_new_tlab(size_t min_size,
|
||||
size_t requested_size,
|
||||
@ -131,6 +131,10 @@ public:
|
||||
MemRegion reserved_region() const { return _reserved; }
|
||||
bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
|
||||
|
||||
// Support for loading objects from CDS archive into the heap
|
||||
virtual bool can_load_archived_objects() const { return true; }
|
||||
virtual HeapWord* allocate_loaded_archive_space(size_t size);
|
||||
|
||||
virtual void print_on(outputStream* st) const;
|
||||
virtual void print_tracing_info() const;
|
||||
virtual bool print_location(outputStream* st, void* addr) const;
|
||||
|
@ -481,6 +481,11 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
// Is the given object inside a CDS archive area?
|
||||
virtual bool is_archived_object(oop object) const;
|
||||
|
||||
// Support for loading objects from CDS archive into the heap
|
||||
// (usually as a snapshot of the old generation).
|
||||
virtual bool can_load_archived_objects() const { return false; }
|
||||
virtual HeapWord* allocate_loaded_archive_space(size_t size) { return NULL; }
|
||||
|
||||
virtual bool is_oop(oop object) const;
|
||||
// Non product verification and debugging.
|
||||
#ifndef PRODUCT
|
||||
|
@ -242,7 +242,7 @@ void Universe::serialize(SerializeClosure* f) {
|
||||
_mirrors[i] = OopHandle(vm_global(), mirror_oop);
|
||||
}
|
||||
} else {
|
||||
if (HeapShared::is_heap_object_archiving_allowed()) {
|
||||
if (HeapShared::can_write()) {
|
||||
mirror_oop = _mirrors[i].resolve();
|
||||
} else {
|
||||
mirror_oop = NULL;
|
||||
@ -433,9 +433,9 @@ void Universe::genesis(TRAPS) {
|
||||
void Universe::initialize_basic_type_mirrors(TRAPS) {
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
if (UseSharedSpaces &&
|
||||
HeapShared::open_regions_mapped() &&
|
||||
HeapShared::are_archived_mirrors_available() &&
|
||||
_mirrors[T_INT].resolve() != NULL) {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(), "Sanity");
|
||||
assert(HeapShared::can_use(), "Sanity");
|
||||
|
||||
// check that all mirrors are mapped also
|
||||
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
|
||||
@ -769,6 +769,9 @@ jint universe_init() {
|
||||
// currently mapped regions.
|
||||
MetaspaceShared::initialize_shared_spaces();
|
||||
StringTable::create_table();
|
||||
if (HeapShared::is_loaded()) {
|
||||
StringTable::transfer_shared_strings_to_local_table();
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
|
@ -347,7 +347,7 @@ void ConstantPool::restore_unshareable_info(TRAPS) {
|
||||
if (vmClasses::Object_klass_loaded()) {
|
||||
ClassLoaderData* loader_data = pool_holder()->class_loader_data();
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
if (HeapShared::open_regions_mapped() &&
|
||||
if (HeapShared::is_fully_available() &&
|
||||
_cache->archived_references() != NULL) {
|
||||
oop archived = _cache->archived_references();
|
||||
// Create handle for the archived resolved reference array object
|
||||
|
@ -604,7 +604,7 @@ void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protec
|
||||
if (this->has_archived_mirror_index()) {
|
||||
ResourceMark rm(THREAD);
|
||||
log_debug(cds, mirror)("%s has raw archived mirror", external_name());
|
||||
if (HeapShared::open_regions_mapped()) {
|
||||
if (HeapShared::are_archived_mirrors_available()) {
|
||||
bool present = java_lang_Class::restore_archived_mirror(this, loader, module_handle,
|
||||
protection_domain,
|
||||
CHECK);
|
||||
|
@ -2002,7 +2002,7 @@ WB_ENTRY(jboolean, WB_IsJVMCISupportedByGC(JNIEnv* env))
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jboolean, WB_IsJavaHeapArchiveSupported(JNIEnv* env))
|
||||
return HeapShared::is_heap_object_archiving_allowed();
|
||||
return HeapShared::can_use();
|
||||
WB_END
|
||||
|
||||
|
||||
|
@ -368,6 +368,7 @@ hotspot_appcds_dynamic = \
|
||||
-runtime/cds/appcds/StaticArchiveWithLambda.java \
|
||||
-runtime/cds/appcds/TestCombinedCompressedFlags.java \
|
||||
-runtime/cds/appcds/TestZGCWithCDS.java \
|
||||
-runtime/cds/appcds/TestEpsilonGCWithCDS.java \
|
||||
-runtime/cds/appcds/UnusedCPDuringDump.java \
|
||||
-runtime/cds/appcds/VerifierTest_1B.java
|
||||
|
||||
|
@ -398,10 +398,7 @@ public class TestCommon extends CDSTestUtils {
|
||||
public static OutputAnalyzer runWithArchive(AppCDSOptions opts)
|
||||
throws Exception {
|
||||
|
||||
ArrayList<String> cmd = new ArrayList<String>();
|
||||
|
||||
for (String p : opts.prefix) cmd.add(p);
|
||||
|
||||
ArrayList<String> cmd = opts.getRuntimePrefix();
|
||||
cmd.add("-Xshare:" + opts.xShareMode);
|
||||
cmd.add("-showversion");
|
||||
cmd.add("-XX:SharedArchiveFile=" + getCurrentArchiveName());
|
||||
|
@ -0,0 +1,97 @@
|
||||
/*
|
||||
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test Loading CDS archived heap objects into EpsilonGC
|
||||
* @bug 8234679
|
||||
* @requires vm.cds
|
||||
* @requires vm.gc.Epsilon
|
||||
* @requires vm.gc.G1
|
||||
*
|
||||
* @comment don't run this test if any -XX::+Use???GC options are specified, since they will
|
||||
* interfere with the the test.
|
||||
* @requires vm.gc == null
|
||||
*
|
||||
* @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds
|
||||
* @compile test-classes/Hello.java
|
||||
* @run driver TestEpsilonGCWithCDS
|
||||
*/
|
||||
|
||||
import jdk.test.lib.Platform;
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
|
||||
public class TestEpsilonGCWithCDS {
|
||||
public final static String HELLO = "Hello World";
|
||||
static String helloJar;
|
||||
|
||||
public static void main(String... args) throws Exception {
|
||||
helloJar = JarBuilder.build("hello", "Hello");
|
||||
|
||||
// Check if we can use EpsilonGC during dump time, or run time, or both.
|
||||
test(false, true);
|
||||
test(true, false);
|
||||
test(true, true);
|
||||
|
||||
// We usually have 2 heap regions. To increase test coverage, we can have 3 heap regions
|
||||
// by using "-Xmx256m -XX:ObjectAlignmentInBytes=64"
|
||||
test(false, true, true);
|
||||
}
|
||||
|
||||
final static String G1 = "-XX:+UseG1GC";
|
||||
final static String Epsilon = "-XX:+UseEpsilonGC";
|
||||
final static String experiment = "-XX:+UnlockExperimentalVMOptions";
|
||||
|
||||
static void test(boolean dumpWithEpsilon, boolean execWithEpsilon) throws Exception {
|
||||
test(dumpWithEpsilon, execWithEpsilon, false);
|
||||
}
|
||||
|
||||
static void test(boolean dumpWithEpsilon, boolean execWithEpsilon, boolean useSmallRegions) throws Exception {
|
||||
String dumpGC = dumpWithEpsilon ? Epsilon : G1;
|
||||
String execGC = execWithEpsilon ? Epsilon : G1;
|
||||
String small1 = useSmallRegions ? "-Xmx256m" : "-showversion";
|
||||
String small2 = useSmallRegions ? "-XX:ObjectAlignmentInBytes=64" : "-showversion";
|
||||
OutputAnalyzer out;
|
||||
|
||||
System.out.println("0. Dump with " + dumpGC);
|
||||
out = TestCommon.dump(helloJar,
|
||||
new String[] {"Hello"},
|
||||
experiment,
|
||||
dumpGC,
|
||||
small1,
|
||||
small2,
|
||||
"-Xlog:cds");
|
||||
out.shouldContain("Dumping shared data to file:");
|
||||
out.shouldHaveExitValue(0);
|
||||
|
||||
System.out.println("1. Exec with " + execGC);
|
||||
out = TestCommon.exec(helloJar,
|
||||
experiment,
|
||||
execGC,
|
||||
small1,
|
||||
small2,
|
||||
"-Xlog:cds",
|
||||
"Hello");
|
||||
out.shouldContain(HELLO);
|
||||
out.shouldHaveExitValue(0);
|
||||
}
|
||||
}
|
@ -39,11 +39,16 @@
|
||||
* @run driver HelloCustom
|
||||
*/
|
||||
|
||||
import jdk.test.lib.cds.CDSOptions;
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
import jdk.test.lib.helpers.ClassFileInstaller;
|
||||
import sun.hotspot.WhiteBox;
|
||||
|
||||
public class HelloCustom {
|
||||
static {
|
||||
// EpsilonGC does not support class unloading.
|
||||
CDSOptions.disableRuntimePrefixForEpsilonGC();
|
||||
}
|
||||
public static void main(String[] args) throws Exception {
|
||||
run();
|
||||
}
|
||||
|
@ -39,10 +39,15 @@
|
||||
* @run driver UnloadUnregisteredLoaderTest
|
||||
*/
|
||||
|
||||
import jdk.test.lib.cds.CDSOptions;
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
import sun.hotspot.WhiteBox;
|
||||
|
||||
public class UnloadUnregisteredLoaderTest {
|
||||
static {
|
||||
// EpsilonGC does not support class unloading.
|
||||
CDSOptions.disableRuntimePrefixForEpsilonGC();
|
||||
}
|
||||
public static void main(String[] args) throws Exception {
|
||||
String appJar1 = JarBuilder.build("UnloadUnregisteredLoader_app1", "UnloadUnregisteredLoader");
|
||||
String appJar2 = JarBuilder.build(true, "UnloadUnregisteredLoader_app2",
|
||||
|
@ -45,6 +45,10 @@ import jdk.test.lib.process.ProcessTools;
|
||||
import jdk.test.lib.helpers.ClassFileInstaller;
|
||||
|
||||
public class GCSharedStringsDuringDump {
|
||||
static {
|
||||
// EpsilonGC will run out of memory.
|
||||
CDSOptions.disableRuntimePrefixForEpsilonGC();
|
||||
}
|
||||
public static String appClasses[] = {
|
||||
GCSharedStringsDuringDumpWb.class.getName(),
|
||||
};
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,11 +34,16 @@ import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.PrintWriter;
|
||||
import jdk.test.lib.cds.CDSOptions;
|
||||
import jdk.test.lib.cds.CDSTestUtils;
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
import jdk.test.lib.process.ProcessTools;
|
||||
|
||||
public class SharedStringsStress {
|
||||
static {
|
||||
// EpsilonGC will run out of memory.
|
||||
CDSOptions.disableRuntimePrefixForEpsilonGC();
|
||||
}
|
||||
static String sharedArchiveConfigFile = CDSTestUtils.getOutputDir() + File.separator + "SharedStringsStress_gen.txt";
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
@ -121,6 +121,7 @@ public class VMProps implements Callable<Map<String, String>> {
|
||||
map.put("jdk.containerized", this::jdkContainerized);
|
||||
map.put("vm.flagless", this::isFlagless);
|
||||
vmGC(map); // vm.gc.X = true/false
|
||||
vmGCforCDS(map); // may set vm.gc
|
||||
vmOptFinalFlags(map);
|
||||
|
||||
dump(map.map);
|
||||
@ -291,6 +292,34 @@ public class VMProps implements Callable<Map<String, String>> {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* "jtreg -vmoptions:-Dtest.cds.runtime.options=..." can be used to specify
|
||||
* the GC type to be used when running with a CDS archive. Set "vm.gc" accordingly,
|
||||
* so that tests that need to explicitly choose the GC type can be excluded
|
||||
* with "@requires vm.gc == null".
|
||||
*
|
||||
* @param map - property-value pairs
|
||||
*/
|
||||
protected void vmGCforCDS(SafeMap map) {
|
||||
if (!GC.isSelectedErgonomically()) {
|
||||
// The GC has been explicitly specified on the command line, so
|
||||
// jtreg will set the "vm.gc" property. Let's not interfere with it.
|
||||
return;
|
||||
}
|
||||
|
||||
String GC_PREFIX = "-XX:+Use";
|
||||
String GC_SUFFIX = "GC";
|
||||
String jtropts = System.getProperty("test.cds.runtime.options");
|
||||
if (jtropts != null) {
|
||||
for (String opt : jtropts.split(",")) {
|
||||
if (opt.startsWith(GC_PREFIX) && opt.endsWith(GC_SUFFIX)) {
|
||||
String gc = opt.substring(GC_PREFIX.length(), opt.length() - GC_SUFFIX.length());
|
||||
map.put("vm.gc", () -> gc);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Selected final flag.
|
||||
*
|
||||
|
@ -100,4 +100,44 @@ public class CDSOptions {
|
||||
return this;
|
||||
}
|
||||
|
||||
// Call by CDSTestUtils.runWithArchive() and TestCommon.runWithArchive().
|
||||
//
|
||||
// Example:
|
||||
// - The dumping will be done with the default G1GC so we can generate
|
||||
// the archived heap.
|
||||
// - The runtime execution will be done with the EpsilonGC, to test its
|
||||
// ability to load the the archived heap.
|
||||
//
|
||||
// jtreg -vmoptions:-Dtest.cds.runtime.options=-XX:+UnlockExperimentalVMOptions,-XX:+UseEpsilonGC \
|
||||
// test/hotspot/jtreg/runtime/cds
|
||||
public ArrayList<String> getRuntimePrefix() {
|
||||
ArrayList<String> cmdline = new ArrayList<>();
|
||||
|
||||
String jtropts = System.getProperty("test.cds.runtime.options");
|
||||
if (jtropts != null) {
|
||||
for (String s : jtropts.split(",")) {
|
||||
if (!disabledRuntimePrefixes.contains(s)) {
|
||||
cmdline.add(s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (String p : prefix) {
|
||||
cmdline.add(p);
|
||||
}
|
||||
|
||||
return cmdline;
|
||||
}
|
||||
|
||||
static ArrayList<String> disabledRuntimePrefixes = new ArrayList<>();
|
||||
|
||||
// Do not use the command-line option s, even if it's specified in -Dtest.cds.runtime.options
|
||||
private static void disableRuntimePrefix(String s) {
|
||||
disabledRuntimePrefixes.add(s);
|
||||
}
|
||||
|
||||
// Do not use the command-line option "-XX:+UseEpsilonGC", even if it's specified in -Dtest.cds.runtime.options
|
||||
public static void disableRuntimePrefixForEpsilonGC() {
|
||||
disableRuntimePrefix("-XX:+UseEpsilonGC");
|
||||
}
|
||||
}
|
||||
|
@ -404,10 +404,7 @@ public class CDSTestUtils {
|
||||
public static OutputAnalyzer runWithArchive(CDSOptions opts)
|
||||
throws Exception {
|
||||
|
||||
ArrayList<String> cmd = new ArrayList<String>();
|
||||
|
||||
for (String p : opts.prefix) cmd.add(p);
|
||||
|
||||
ArrayList<String> cmd = opts.getRuntimePrefix();
|
||||
cmd.add("-Xshare:" + opts.xShareMode);
|
||||
cmd.add("-Dtest.timeout.factor=" + TestTimeoutFactor);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user