8311604: Simplify NOCOOPS requested addresses for archived heap objects

Reviewed-by: ccheung
This commit is contained in:
Ioi Lam 2023-07-09 15:18:46 +00:00
parent 4a1fcb6063
commit 581f90e242
30 changed files with 231 additions and 244 deletions

@ -1034,7 +1034,7 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
#if INCLUDE_CDS_JAVA_HEAP
static void log_heap_region(ArchiveHeapInfo* heap_info) {
MemRegion r = heap_info->memregion();
MemRegion r = heap_info->buffer_region();
address start = address(r.start());
address end = address(r.end());
log_region("heap", start, end, to_requested(start));
@ -1204,8 +1204,8 @@ void ArchiveBuilder::print_bitmap_region_stats(size_t size, size_t total_size) {
}
void ArchiveBuilder::print_heap_region_stats(ArchiveHeapInfo *info, size_t total_size) {
char* start = info->start();
size_t size = info->byte_size();
char* start = info->buffer_start();
size_t size = info->buffer_byte_size();
char* top = start + size;
log_debug(cds)("hp space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
size, size/double(total_size)*100.0, size, p2i(start));

@ -54,18 +54,20 @@ intx ArchiveHeapLoader::_runtime_offset = 0;
bool ArchiveHeapLoader::_loading_failed = false;
// Support for mapped heap.
uintptr_t ArchiveHeapLoader::_mapped_heap_bottom = 0;
bool ArchiveHeapLoader::_mapped_heap_relocation_initialized = false;
ptrdiff_t ArchiveHeapLoader::_mapped_heap_delta = 0;
// Every mapped region is offset by _mapped_heap_delta from its requested address.
// See FileMapInfo::heap_region_requested_address().
void ArchiveHeapLoader::init_mapped_heap_relocation(ptrdiff_t delta, int dumptime_oop_shift) {
void ArchiveHeapLoader::init_mapped_heap_info(address mapped_heap_bottom, ptrdiff_t delta, int dumptime_oop_shift) {
assert(!_mapped_heap_relocation_initialized, "only once");
if (!UseCompressedOops) {
assert(dumptime_oop_shift == 0, "sanity");
}
assert(can_map(), "sanity");
init_narrow_oop_decoding(CompressedOops::base() + delta, dumptime_oop_shift);
_mapped_heap_bottom = (intptr_t)mapped_heap_bottom;
_mapped_heap_delta = delta;
_mapped_heap_relocation_initialized = true;
}
@ -374,6 +376,9 @@ void ArchiveHeapLoader::finish_initialization() {
}
if (is_in_use()) {
patch_native_pointers();
intptr_t bottom = is_loaded() ? _loaded_heap_bottom : _mapped_heap_bottom;
intptr_t roots_oop = bottom + FileMapInfo::current_info()->heap_roots_offset();
HeapShared::init_roots(cast_to_oop(roots_oop));
}
}

@ -102,7 +102,7 @@ public:
static void fixup_region() NOT_CDS_JAVA_HEAP_RETURN;
#if INCLUDE_CDS_JAVA_HEAP
static void init_mapped_heap_relocation(ptrdiff_t delta, int dumptime_oop_shift);
static void init_mapped_heap_info(address mapped_heap_bottom, ptrdiff_t delta, int dumptime_oop_shift);
private:
static bool _is_mapped;
static bool _is_loaded;
@ -124,6 +124,7 @@ private:
// is_mapped() only: the mapped address of each region is offset by this amount from
// their requested address.
static uintptr_t _mapped_heap_bottom;
static ptrdiff_t _mapped_heap_delta;
static bool _mapped_heap_relocation_initialized;

@ -47,11 +47,11 @@
#if INCLUDE_CDS_JAVA_HEAP
GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer;
GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer = nullptr;
// The following are offsets from buffer_bottom()
size_t ArchiveHeapWriter::_buffer_used;
size_t ArchiveHeapWriter::_heap_roots_bottom_offset;
size_t ArchiveHeapWriter::_heap_roots_offset;
size_t ArchiveHeapWriter::_heap_roots_word_size;
@ -153,7 +153,7 @@ address ArchiveHeapWriter::buffered_addr_to_requested_addr(address buffered_addr
}
oop ArchiveHeapWriter::heap_roots_requested_address() {
return cast_to_oop(_requested_bottom + _heap_roots_bottom_offset);
return cast_to_oop(_requested_bottom + _heap_roots_offset);
}
address ArchiveHeapWriter::requested_address() {
@ -213,7 +213,7 @@ void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShar
}
log_info(cds, heap)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
_heap_roots_bottom_offset = _buffer_used;
_heap_roots_offset = _buffer_used;
_buffer_used = new_used;
}
@ -339,13 +339,25 @@ void ArchiveHeapWriter::set_requested_address(ArchiveHeapInfo* info) {
size_t heap_region_byte_size = _buffer_used;
assert(heap_region_byte_size > 0, "must archived at least one object!");
_requested_bottom = align_down(heap_end - heap_region_byte_size, HeapRegion::GrainBytes);
if (UseCompressedOops) {
_requested_bottom = align_down(heap_end - heap_region_byte_size, HeapRegion::GrainBytes);
} else {
// We always write the objects as if the heap started at this address. This
// makes the contents of the archive heap deterministic.
//
// Note that at runtime, the heap address is selected by the OS, so the archive
// heap will not be mapped at 0x10000000, and the contents need to be patched.
_requested_bottom = (address)NOCOOPS_REQUESTED_BASE;
}
assert(is_aligned(_requested_bottom, HeapRegion::GrainBytes), "sanity");
_requested_top = _requested_bottom + _buffer_used;
info->set_memregion(MemRegion(offset_to_buffered_address<HeapWord*>(0),
offset_to_buffered_address<HeapWord*>(_buffer_used)));
info->set_buffer_region(MemRegion(offset_to_buffered_address<HeapWord*>(0),
offset_to_buffered_address<HeapWord*>(_buffer_used)));
info->set_heap_roots_offset(_heap_roots_offset);
}
// Oop relocation
@ -371,14 +383,11 @@ template <typename T> void ArchiveHeapWriter::store_requested_oop_in_buffer(T* b
store_oop_in_buffer(buffered_addr, request_oop);
}
void ArchiveHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) {
// Make heap content deterministic. See comments inside HeapShared::to_requested_address.
*buffered_addr = HeapShared::to_requested_address(requested_obj);
inline void ArchiveHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) {
*buffered_addr = requested_obj;
}
void ArchiveHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) {
// Note: HeapShared::to_requested_address() is not necessary because
// the heap always starts at a deterministic address with UseCompressedOops==true.
inline void ArchiveHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) {
narrowOop val = CompressedOops::encode_not_null(requested_obj);
*buffered_addr = val;
}
@ -481,7 +490,7 @@ void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassSh
// Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and
// doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it.
oop requested_roots = requested_obj_from_buffer_offset(_heap_roots_bottom_offset);
oop requested_roots = requested_obj_from_buffer_offset(_heap_roots_offset);
update_header_for_requested_obj(requested_roots, nullptr, Universe::objectArrayKlassObj());
int length = roots != nullptr ? roots->length() : 0;
for (int i = 0; i < length; i++) {

@ -38,26 +38,81 @@
class MemRegion;
class ArchiveHeapInfo {
MemRegion _memregion;
MemRegion _buffer_region; // Contains the archived objects to be written into the CDS archive.
CHeapBitMap _oopmap;
CHeapBitMap _ptrmap;
size_t _heap_roots_offset; // Offset of the HeapShared::roots() object, from the bottom
// of the archived heap objects, in bytes.
public:
ArchiveHeapInfo() : _memregion(), _oopmap(128, mtClassShared), _ptrmap(128, mtClassShared) {}
bool is_used() { return !_memregion.is_empty(); }
ArchiveHeapInfo() : _buffer_region(), _oopmap(128, mtClassShared), _ptrmap(128, mtClassShared) {}
bool is_used() { return !_buffer_region.is_empty(); }
MemRegion memregion() { return _memregion; }
void set_memregion(MemRegion r) { _memregion = r; }
MemRegion buffer_region() { return _buffer_region; }
void set_buffer_region(MemRegion r) { _buffer_region = r; }
char* start() { return (char*)_memregion.start(); }
size_t byte_size() { return _memregion.byte_size(); }
char* buffer_start() { return (char*)_buffer_region.start(); }
size_t buffer_byte_size() { return _buffer_region.byte_size(); }
CHeapBitMap* oopmap() { return &_oopmap; }
CHeapBitMap* ptrmap() { return &_ptrmap; }
void set_heap_roots_offset(size_t n) { _heap_roots_offset = n; }
size_t heap_roots_offset() const { return _heap_roots_offset; }
};
#if INCLUDE_CDS_JAVA_HEAP
class ArchiveHeapWriter : AllStatic {
// ArchiveHeapWriter manipulates three types of addresses:
//
// "source" vs "buffered" vs "requested"
//
// (Note: the design and convention is the same as for the archiving of Metaspace objects.
// See archiveBuilder.hpp.)
//
// - "source objects" are regular Java objects allocated during the execution
// of "java -Xshare:dump". They can be used as regular oops.
//
// HeapShared::archive_objects() recursively searches for the oops that need to be
// stored into the CDS archive. These are entered into HeapShared::archived_object_cache().
//
// - "buffered objects" are copies of the "source objects", and are stored in into
// ArchiveHeapWriter::_buffer, which is a GrowableArray that sits outside of
// the valid heap range. Therefore we avoid using the addresses of these copies
// as oops. They are usually called "buffered_addr" in the code (of the type "address").
//
// The buffered objects are stored contiguously, possibly with interleaving fillers
// to make sure no objects span across boundaries of MIN_GC_REGION_ALIGNMENT.
//
// - Each archived object has a "requested address" -- at run time, if the object
// can be mapped at this address, we can avoid relocation.
//
// The requested address is implemented differently depending on UseCompressedOops:
//
// UseCompressedOops == true:
// The archived objects are stored assuming that the runtime COOPS compression
// scheme is exactly the same as in dump time (or else a more expensive runtime relocation
// would be needed.)
//
// At dump time, we assume that the runtime heap range is exactly the same as
// in dump time. The requested addresses of the archived objects are chosen such that
// they would occupy the top end of a G1 heap (TBD when dumping is supported by other
// collectors. See JDK-8298614).
//
// UseCompressedOops == false:
// At runtime, the heap range is usually picked (randomly) by the OS, so we will almost always
// need to perform relocation. Hence, the goal of the "requested address" is to ensure that
// the contents of the archived objects are deterministic. I.e., the oop fields of archived
// objects will always point to deterministic addresses.
//
// For G1, the archived heap is written such that the lowest archived object is placed
// at NOCOOPS_REQUESTED_BASE. (TBD after JDK-8298614).
// ----------------------------------------------------------------------
public:
static const intptr_t NOCOOPS_REQUESTED_BASE = 0x10000000;
private:
class EmbeddedOopRelocator;
struct NativePointerInfo {
oop _src_obj;
@ -70,30 +125,13 @@ class ArchiveHeapWriter : AllStatic {
// (TODO: Perhaps change to 256K to be compatible with Shenandoah)
static constexpr int MIN_GC_REGION_ALIGNMENT = 1 * M;
// "source" vs "buffered" vs "requested"
//
// [1] HeapShared::archive_objects() identifies all of the oops that need to be stored
// into the CDS archive. These are entered into HeapShared::archived_object_cache().
// These are called "source objects"
//
// [2] ArchiveHeapWriter::write() copies all source objects into ArchiveHeapWriter::_buffer,
// which is a GrowableArray that sites outside of the valid heap range. Therefore
// we avoid using the addresses of these copies as oops. They are usually
// called "buffered_addr" in the code (of the type "address").
//
// [3] Each archived object has a "requested address" -- at run time, if the object
// can be mapped at this address, we can avoid relocation.
//
// Note: the design and convention is the same as for the archiving of Metaspace objects.
// See archiveBuilder.hpp.
static GrowableArrayCHeap<u1, mtClassShared>* _buffer;
// The number of bytes that have written into _buffer (may be smaller than _buffer->length()).
static size_t _buffer_used;
// The bottom of the copy of Heap::roots() inside this->_buffer.
static size_t _heap_roots_bottom_offset;
static size_t _heap_roots_offset;
static size_t _heap_roots_word_size;
// The address range of the requested location of the archived heap objects.
@ -160,8 +198,8 @@ class ArchiveHeapWriter : AllStatic {
static oop load_oop_from_buffer(oop* buffered_addr);
static oop load_oop_from_buffer(narrowOop* buffered_addr);
static void store_oop_in_buffer(oop* buffered_addr, oop requested_obj);
static void store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj);
inline static void store_oop_in_buffer(oop* buffered_addr, oop requested_obj);
inline static void store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj);
template <typename T> static oop load_source_oop_from_buffer(T* buffered_addr);
template <typename T> static void store_requested_oop_in_buffer(T* buffered_addr, oop request_oop);
@ -182,7 +220,7 @@ public:
static address requested_address(); // requested address of the lowest achived heap object
static oop heap_roots_requested_address(); // requested address of HeapShared::roots()
static address buffered_heap_roots_addr() {
return offset_to_buffered_address<address>(_heap_roots_bottom_offset);
return offset_to_buffered_address<address>(_heap_roots_offset);
}
static size_t heap_roots_word_size() {
return _heap_roots_word_size;

@ -277,21 +277,6 @@ void WriteClosure::do_ptr(void** p) {
_dump_region->append_intptr_t((intptr_t)ptr, true);
}
void WriteClosure::do_oop(oop* o) {
if (*o == nullptr) {
_dump_region->append_intptr_t(0);
} else {
assert(HeapShared::can_write(), "sanity");
intptr_t p;
if (UseCompressedOops) {
p = (intptr_t)CompressedOops::encode_not_null(*o);
} else {
p = cast_from_oop<intptr_t>(HeapShared::to_requested_address(*o));
}
_dump_region->append_intptr_t(p);
}
}
void WriteClosure::do_region(u_char* start, size_t size) {
assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
assert(size % sizeof(intptr_t) == 0, "bad size");
@ -334,28 +319,6 @@ void ReadClosure::do_tag(int tag) {
FileMapInfo::assert_mark(tag == old_tag);
}
void ReadClosure::do_oop(oop *p) {
if (UseCompressedOops) {
narrowOop o = CompressedOops::narrow_oop_cast(nextPtr());
if (CompressedOops::is_null(o) || !ArchiveHeapLoader::is_in_use()) {
*p = nullptr;
} else {
assert(ArchiveHeapLoader::can_use(), "sanity");
assert(ArchiveHeapLoader::is_in_use(), "must be");
*p = ArchiveHeapLoader::decode_from_archive(o);
}
} else {
intptr_t dumptime_oop = nextPtr();
if (dumptime_oop == 0 || !ArchiveHeapLoader::is_in_use()) {
*p = nullptr;
} else {
assert(!ArchiveHeapLoader::is_loaded(), "ArchiveHeapLoader::can_load() is not supported for uncompessed oops");
intptr_t runtime_oop = dumptime_oop + ArchiveHeapLoader::mapped_heap_delta();
*p = cast_to_oop(runtime_oop);
}
}
}
void ReadClosure::do_region(u_char* start, size_t size) {
assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
assert(size % sizeof(intptr_t) == 0, "bad size");

@ -25,8 +25,8 @@
#ifndef SHARE_CDS_ARCHIVEUTILS_HPP
#define SHARE_CDS_ARCHIVEUTILS_HPP
#include "cds/serializeClosure.hpp"
#include "logging/log.hpp"
#include "memory/iterator.hpp"
#include "memory/virtualspace.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/exceptions.hpp"
@ -202,7 +202,6 @@ public:
_dump_region->append_intptr_t((intptr_t)tag);
}
void do_oop(oop* o);
void do_region(u_char* start, size_t size);
bool reading() const { return false; }
};
@ -226,7 +225,6 @@ public:
void do_int(int* p);
void do_bool(bool *p);
void do_tag(int tag);
void do_oop(oop *p);
void do_region(u_char* start, size_t size);
bool reading() const { return true; }
};

@ -22,8 +22,8 @@
*
*/
#ifndef SHARED_CDS_CDSHEAPVERIFIER_HPP
#define SHARED_CDS_CDSHEAPVERIFIER_HPP
#ifndef SHARE_CDS_CDSHEAPVERIFIER_HPP
#define SHARE_CDS_CDSHEAPVERIFIER_HPP
#include "cds/heapShared.hpp"
#include "memory/iterator.hpp"
@ -88,4 +88,4 @@ public:
};
#endif // INCLUDE_CDS_JAVA_HEAP
#endif // SHARED_CDS_CDSHEAPVERIFIER_HPP
#endif // SHARE_CDS_CDSHEAPVERIFIER_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,8 +22,8 @@
*
*/
#ifndef SHARED_CDS_CDSPROTECTIONDOMAIN_HPP
#define SHARED_CDS_CDSPROTECTIONDOMAIN_HPP
#ifndef SHARE_CDS_CDSPROTECTIONDOMAIN_HPP
#define SHARE_CDS_CDSPROTECTIONDOMAIN_HPP
#include "classfile/moduleEntry.hpp"
#include "oops/oopHandle.inline.hpp"
@ -113,4 +113,4 @@ public:
}
};
#endif // SHARED_CDS_CDSPROTECTIONDOMAIN_HPP
#endif // SHARE_CDS_CDSPROTECTIONDOMAIN_HPP

@ -22,8 +22,8 @@
*
*/
#ifndef SHARED_CDS_CDS_GLOBALS_HPP
#define SHARED_CDS_CDS_GLOBALS_HPP
#ifndef SHARE_CDS_CDS_GLOBALS_HPP
#define SHARE_CDS_CDS_GLOBALS_HPP
#include "runtime/globals_shared.hpp"
@ -57,7 +57,7 @@
"Address to allocate shared memory region for class data") \
range(0, SIZE_MAX) \
\
product(ccstr, SharedArchiveConfigFile, nullptr, \
product(ccstr, SharedArchiveConfigFile, nullptr, \
"Data to add to the CDS archive file") \
\
product(uint, SharedSymbolTableBucketSize, 4, \
@ -67,25 +67,25 @@
product(bool, AllowArchivingWithJavaAgent, false, DIAGNOSTIC, \
"Allow Java agent to be run with CDS dumping") \
\
develop(ccstr, ArchiveHeapTestClass, nullptr, \
develop(ccstr, ArchiveHeapTestClass, nullptr, \
"For JVM internal testing only. The static field named " \
"\"archivedObjects\" of the specified class is stored in the " \
"CDS archive heap") \
\
product(ccstr, DumpLoadedClassList, nullptr, \
product(ccstr, DumpLoadedClassList, nullptr, \
"Dump the names all loaded classes, that could be stored into " \
"the CDS archive, in the specified file") \
\
product(ccstr, SharedClassListFile, nullptr, \
product(ccstr, SharedClassListFile, nullptr, \
"Override the default CDS class list") \
\
product(ccstr, SharedArchiveFile, nullptr, \
product(ccstr, SharedArchiveFile, nullptr, \
"Override the default location of the CDS archive file") \
\
product(ccstr, ArchiveClassesAtExit, nullptr, \
product(ccstr, ArchiveClassesAtExit, nullptr, \
"The path and name of the dynamic archive file") \
\
product(ccstr, ExtraSharedClassListFile, nullptr, \
product(ccstr, ExtraSharedClassListFile, nullptr, \
"Extra classlist for building the CDS archive file") \
\
product(int, ArchiveRelocationMode, 0, DIAGNOSTIC, \
@ -99,4 +99,4 @@
DECLARE_FLAGS(CDS_FLAGS)
#endif // SHARED_CDS_CDS_GLOBALS_HPP
#endif // SHARE_CDS_CDS_GLOBALS_HPP

@ -22,8 +22,8 @@
*
*/
#ifndef SHARED_CDS_DUMPTIMESHAREDCLASSINFO_HPP
#define SHARED_CDS_DUMPTIMESHAREDCLASSINFO_HPP
#ifndef SHARE_CDS_DUMPTIMECLASSINFO_HPP
#define SHARE_CDS_DUMPTIMECLASSINFO_HPP
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/metaspaceShared.hpp"
@ -271,4 +271,4 @@ private:
template<typename Function> void iterate_all(Function function) const;
};
#endif // SHARED_CDS_DUMPTIMESHAREDCLASSINFO_HPP
#endif // SHARE_CDS_DUMPTIMECLASSINFO_HPP

@ -1,6 +1,6 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,8 +23,8 @@
*
*/
#ifndef SHARED_CDS_DUMPTIMESHAREDCLASSINFO_INLINE_HPP
#define SHARED_CDS_DUMPTIMESHAREDCLASSINFO_INLINE_HPP
#ifndef SHARE_CDS_DUMPTIMECLASSINFO_INLINE_HPP
#define SHARE_CDS_DUMPTIMECLASSINFO_INLINE_HPP
#include "cds/dumpTimeClassInfo.hpp"
@ -68,4 +68,4 @@ void DumpTimeSharedClassTable::iterate_all_live_classes(ITER* iter) const {
#endif // INCLUDE_CDS
#endif // SHARED_CDS_DUMPTIMESHAREDCLASSINFO_INLINE_HPP
#endif // SHARE_CDS_DUMPTIMECLASSINFO_INLINE_HPP

@ -207,17 +207,6 @@ void FileMapHeader::populate(FileMapInfo *info, size_t core_region_alignment,
_narrow_oop_mode = CompressedOops::mode();
_narrow_oop_base = CompressedOops::base();
_narrow_oop_shift = CompressedOops::shift();
if (UseCompressedOops) {
_heap_begin = CompressedOops::begin();
_heap_end = CompressedOops::end();
} else {
#if INCLUDE_G1GC
address start = (address)G1CollectedHeap::heap()->reserved().start();
address end = (address)G1CollectedHeap::heap()->reserved().end();
_heap_begin = HeapShared::to_requested_address(start);
_heap_end = HeapShared::to_requested_address(end);
#endif
}
}
_compressed_oops = UseCompressedOops;
_compressed_class_ptrs = UseCompressedClassPointers;
@ -286,8 +275,6 @@ void FileMapHeader::print(outputStream* st) {
st->print_cr("- compressed_class_ptrs: %d", _compressed_class_ptrs);
st->print_cr("- cloned_vtables_offset: " SIZE_FORMAT_X, _cloned_vtables_offset);
st->print_cr("- serialized_data_offset: " SIZE_FORMAT_X, _serialized_data_offset);
st->print_cr("- heap_begin: " INTPTR_FORMAT, p2i(_heap_begin));
st->print_cr("- heap_end: " INTPTR_FORMAT, p2i(_heap_end));
st->print_cr("- jvm_ident: %s", _jvm_ident);
st->print_cr("- shared_path_table_offset: " SIZE_FORMAT_X, _shared_path_table_offset);
st->print_cr("- app_class_paths_start_index: %d", _app_class_paths_start_index);
@ -300,6 +287,7 @@ void FileMapHeader::print(outputStream* st) {
st->print_cr("- has_non_jar_in_classpath: %d", _has_non_jar_in_classpath);
st->print_cr("- requested_base_address: " INTPTR_FORMAT, p2i(_requested_base_address));
st->print_cr("- mapped_base_address: " INTPTR_FORMAT, p2i(_mapped_base_address));
st->print_cr("- heap_roots_offset: " SIZE_FORMAT, _heap_roots_offset);
st->print_cr("- allow_archiving_with_java_agent:%d", _allow_archiving_with_java_agent);
st->print_cr("- use_optimized_module_handling: %d", _use_optimized_module_handling);
st->print_cr("- use_full_module_graph %d", _use_full_module_graph);
@ -1567,9 +1555,7 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
mapping_offset = (size_t)((address)requested_base - CompressedOops::base());
assert((mapping_offset >> CompressedOops::shift()) << CompressedOops::shift() == mapping_offset, "must be");
} else {
#if INCLUDE_G1GC
mapping_offset = requested_base - (char*)G1CollectedHeap::heap()->reserved().start();
#endif
mapping_offset = 0; // not used with !UseCompressedOops
}
#endif // INCLUDE_CDS_JAVA_HEAP
} else {
@ -1634,10 +1620,11 @@ char* FileMapInfo::write_bitmap_region(const CHeapBitMap* ptrmap, ArchiveHeapInf
}
size_t FileMapInfo::write_heap_region(ArchiveHeapInfo* heap_info) {
char* start = heap_info->start();
size_t size = heap_info->byte_size();
write_region(MetaspaceShared::hp, start, size, false, false);
return size;
char* buffer_start = heap_info->buffer_start();
size_t buffer_size = heap_info->buffer_byte_size();
write_region(MetaspaceShared::hp, buffer_start, buffer_size, false, false);
header()->set_heap_roots_offset(heap_info->heap_roots_offset());
return buffer_size;
}
// Dump bytes to file -- at the current file position.
@ -2027,9 +2014,6 @@ bool FileMapInfo::can_use_heap_region() {
archive_narrow_klass_shift);
log_info(cds)(" narrow_oop_mode = %d, narrow_oop_base = " PTR_FORMAT ", narrow_oop_shift = %d",
narrow_oop_mode(), p2i(narrow_oop_base()), narrow_oop_shift());
log_info(cds)(" heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
p2i(header()->heap_begin()), p2i(header()->heap_end()));
log_info(cds)("The current max heap size = " SIZE_FORMAT "M, HeapRegion::GrainBytes = " SIZE_FORMAT,
MaxHeapSize/M, HeapRegion::GrainBytes);
log_info(cds)(" narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
@ -2084,9 +2068,10 @@ address FileMapInfo::heap_region_requested_address() {
// which is the runtime location of the referenced object.
return /*runtime*/ CompressedOops::base() + r->mapping_offset();
} else {
// We can avoid relocation if each region is mapped into the exact same address
// where it was at dump time.
return /*dumptime*/header()->heap_begin() + r->mapping_offset();
// This was the hard-coded requested base address used at dump time. With uncompressed oops,
// the heap range is assigned by the OS so we will most likely have to relocate anyway, no matter
// what base address was picked at duump time.
return (address)ArchiveHeapWriter::NOCOOPS_REQUESTED_BASE;
}
}
@ -2172,7 +2157,7 @@ bool FileMapInfo::map_heap_region_impl() {
if (delta != 0) {
_heap_pointers_need_patching = true;
}
ArchiveHeapLoader::init_mapped_heap_relocation(delta, narrow_oop_shift());
ArchiveHeapLoader::init_mapped_heap_info(mapped_start, delta, narrow_oop_shift());
if (_heap_pointers_need_patching) {
char* bitmap_base = map_bitmap_region();

@ -198,8 +198,6 @@ private:
bool _compressed_class_ptrs; // save the flag UseCompressedClassPointers
size_t _cloned_vtables_offset; // The address of the first cloned vtable
size_t _serialized_data_offset; // Data accessed using {ReadClosure,WriteClosure}::serialize()
address _heap_begin; // heap begin at dump time.
address _heap_end; // heap end at dump time.
bool _has_non_jar_in_classpath; // non-jar file entry exists in classpath
unsigned int _common_app_classpath_prefix_size; // size of the common prefix of app class paths
// 0 if no common prefix exists
@ -232,6 +230,8 @@ private:
// some expensive operations.
bool _use_full_module_graph; // Can we use the full archived module graph?
size_t _ptrmap_size_in_bits; // Size of pointer relocation bitmap
size_t _heap_roots_offset; // Offset of the HeapShared::roots() object, from the bottom
// of the archived heap objects, in bytes.
char* from_mapped_offset(size_t offset) const {
return mapped_base_address() + offset;
}
@ -263,8 +263,6 @@ public:
CompressedOops::Mode narrow_oop_mode() const { return _narrow_oop_mode; }
char* cloned_vtables() const { return from_mapped_offset(_cloned_vtables_offset); }
char* serialized_data() const { return from_mapped_offset(_serialized_data_offset); }
address heap_begin() const { return _heap_begin; }
address heap_end() const { return _heap_end; }
const char* jvm_ident() const { return _jvm_ident; }
char* requested_base_address() const { return _requested_base_address; }
char* mapped_base_address() const { return _mapped_base_address; }
@ -273,6 +271,7 @@ public:
size_t ptrmap_size_in_bits() const { return _ptrmap_size_in_bits; }
bool compressed_oops() const { return _compressed_oops; }
bool compressed_class_pointers() const { return _compressed_class_ptrs; }
size_t heap_roots_offset() const { return _heap_roots_offset; }
// FIXME: These should really return int
jshort max_used_path_index() const { return _max_used_path_index; }
jshort app_module_paths_start_index() const { return _app_module_paths_start_index; }
@ -284,6 +283,7 @@ public:
void set_serialized_data(char* p) { set_as_offset(p, &_serialized_data_offset); }
void set_ptrmap_size_in_bits(size_t s) { _ptrmap_size_in_bits = s; }
void set_mapped_base_address(char* p) { _mapped_base_address = p; }
void set_heap_roots_offset(size_t n) { _heap_roots_offset = n; }
void copy_base_archive_name(const char* name);
void set_shared_path_table(SharedPathTable table) {
@ -379,6 +379,7 @@ public:
address narrow_oop_base() const { return header()->narrow_oop_base(); }
int narrow_oop_shift() const { return header()->narrow_oop_shift(); }
uintx max_heap_size() const { return header()->max_heap_size(); }
size_t heap_roots_offset() const { return header()->heap_roots_offset(); }
size_t core_region_alignment() const { return header()->core_region_alignment(); }
CompressedOops::Mode narrow_oop_mode() const { return header()->narrow_oop_mode(); }

@ -832,23 +832,10 @@ void HeapShared::write_subgraph_info_table() {
}
}
void HeapShared::serialize_root(SerializeClosure* soc) {
oop roots_oop = nullptr;
if (soc->reading()) {
soc->do_oop(&roots_oop); // read from archive
assert(oopDesc::is_oop_or_null(roots_oop), "is oop");
// Create an OopHandle only if we have actually mapped or loaded the roots
if (roots_oop != nullptr) {
assert(ArchiveHeapLoader::is_in_use(), "must be");
_roots = OopHandle(Universe::vm_global(), roots_oop);
}
} else {
// writing
if (HeapShared::can_write()) {
roots_oop = ArchiveHeapWriter::heap_roots_requested_address();
}
soc->do_oop(&roots_oop); // write to archive
void HeapShared::init_roots(oop roots_oop) {
if (roots_oop != nullptr) {
assert(ArchiveHeapLoader::is_in_use(), "must be");
_roots = OopHandle(Universe::vm_global(), roots_oop);
}
}
@ -1669,8 +1656,6 @@ class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
_num_total_oops ++;
narrowOop v = *p;
if (!CompressedOops::is_null(v)) {
// Note: HeapShared::to_requested_address() is not necessary because
// the heap always starts at a deterministic address with UseCompressedOops==true.
size_t idx = p - (narrowOop*)_start;
_oopmap->set_bit(idx);
} else {
@ -1692,33 +1677,6 @@ class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
};
#endif
address HeapShared::to_requested_address(address dumptime_addr) {
assert(DumpSharedSpaces, "static dump time only");
if (dumptime_addr == nullptr || UseCompressedOops) {
return dumptime_addr;
}
// With UseCompressedOops==false, actual_base is selected by the OS so
// it's different across -Xshare:dump runs.
address actual_base = (address)G1CollectedHeap::heap()->reserved().start();
address actual_end = (address)G1CollectedHeap::heap()->reserved().end();
assert(actual_base <= dumptime_addr && dumptime_addr <= actual_end, "must be an address in the heap");
// We always write the objects as if the heap started at this address. This
// makes the heap content deterministic.
//
// Note that at runtime, the heap address is also selected by the OS, so
// the archive heap will not be mapped at 0x10000000. Instead, we will call
// HeapShared::patch_embedded_pointers() to relocate the heap contents
// accordingly.
const address REQUESTED_BASE = (address)0x10000000;
intx delta = REQUESTED_BASE - actual_base;
address requested_addr = dumptime_addr + delta;
assert(REQUESTED_BASE != 0 && requested_addr != nullptr, "sanity");
return requested_addr;
}
#ifndef PRODUCT
ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
size_t num_bits = region.byte_size() / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));

@ -313,7 +313,6 @@ private:
static bool archive_object(oop obj);
static void copy_interned_strings();
static void copy_roots();
static void resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]);
static void resolve_classes_for_subgraph_of(JavaThread* current, Klass* k);
@ -405,16 +404,10 @@ private:
static void init_for_dumping(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static void write_subgraph_info_table() NOT_CDS_JAVA_HEAP_RETURN;
static void serialize_root(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
static void init_roots(oop roots_oop) NOT_CDS_JAVA_HEAP_RETURN;
static void serialize_tables(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
static bool initialize_enum_klass(InstanceKlass* k, TRAPS) NOT_CDS_JAVA_HEAP_RETURN_(false);
// Returns the address of a heap object when it's mapped at the
// runtime requested address. See comments in archiveBuilder.hpp.
static address to_requested_address(address dumptime_addr) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
static oop to_requested_address(oop dumptime_oop) {
return cast_to_oop(to_requested_address(cast_from_oop<address>(dumptime_oop)));
}
static bool is_a_test_class_in_unnamed_module(Klass* ik) NOT_CDS_JAVA_HEAP_RETURN_(false);
};

@ -22,8 +22,8 @@
*
*/
#ifndef SHARED_CDS_LAMBDAPROXYCLASSINFO_HPP
#define SHARED_CDS_LAMBDAPROXYCLASSINFO_HPP
#ifndef SHARE_CDS_LAMBDAPROXYCLASSINFO_HPP
#define SHARE_CDS_LAMBDAPROXYCLASSINFO_HPP
#include "cds/metaspaceShared.hpp"
#include "classfile/javaClasses.hpp"
#include "utilities/growableArray.hpp"
@ -177,4 +177,4 @@ class LambdaProxyClassDictionary : public OffsetCompactHashtable<
const RunTimeLambdaProxyClassInfo*,
RunTimeLambdaProxyClassInfo::EQUALS> {};
#endif // SHARED_CDS_LAMBDAPROXYCLASSINFO_HPP
#endif // SHARE_CDS_LAMBDAPROXYCLASSINFO_HPP

@ -366,7 +366,6 @@ void MetaspaceShared::serialize(SerializeClosure* soc) {
// Dump/restore miscellaneous metadata.
JavaClasses::serialize_offsets(soc);
HeapShared::serialize_root(soc);
Universe::serialize(soc);
soc->do_tag(--tag);

@ -33,6 +33,7 @@
class FileMapInfo;
class outputStream;
class SerializeClosure;
template<class E> class GrowableArray;

@ -22,8 +22,8 @@
*
*/
#ifndef SHARED_CDS_SHAREDCLASSINFO_HPP
#define SHARED_CDS_SHAREDCLASSINFO_HPP
#ifndef SHARE_CDS_RUNTIMECLASSINFO_HPP
#define SHARE_CDS_RUNTIMECLASSINFO_HPP
#include "cds/archiveBuilder.hpp"
#include "cds/archiveUtils.hpp"
@ -256,4 +256,4 @@ class RunTimeSharedDictionary : public OffsetCompactHashtable<
Symbol*,
const RunTimeClassInfo*,
RunTimeClassInfo::EQUALS> {};
#endif // SHARED_CDS_SHAREDCLASSINFO_HPP
#endif // SHARE_CDS_RUNTIMECLASSINFO_HPP

@ -0,0 +1,68 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_CDS_SERIALIZECLOSURE_HPP
#define SHARE_CDS_SERIALIZECLOSURE_HPP
#include "memory/allocation.hpp"
#include "oops/oopsHierarchy.hpp"
// A handy way to read/write auxiliary information in the CDS archive file
// (without the burden of adding new fields into FileMapHeader).
class SerializeClosure : public StackObj {
public:
// Return bool indicating whether closure implements read or write.
virtual bool reading() const = 0;
// Read/write the void pointer pointed to by p.
virtual void do_ptr(void** p) = 0;
// Read/write the 32-bit unsigned integer pointed to by p.
virtual void do_u4(u4* p) = 0;
// Read/write the int pointed to by p.
virtual void do_int(int* p) = 0;
// Read/write the bool pointed to by p.
virtual void do_bool(bool* p) = 0;
// Read/write the region specified.
virtual void do_region(u_char* start, size_t size) = 0;
// Check/write the tag. If reading, then compare the tag against
// the passed in value and fail is they don't match. This allows
// for verification that sections of the serialized data are of the
// correct length.
virtual void do_tag(int tag) = 0;
bool writing() {
return !reading();
}
// Useful alias
template <typename T> void do_ptr(T** p) { do_ptr((void**)p); }
};
#endif // SHARE_CDS_SERIALIZECLOSURE_HPP

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "cds/metaspaceShared.hpp"
#include "cds/serializeClosure.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/classLoaderDataShared.hpp"
#include "classfile/moduleEntry.hpp"

@ -36,6 +36,7 @@
class JvmtiThreadState;
class RecordComponent;
class SerializeClosure;
#define CHECK_INIT(offset) assert(offset != 0, "should be initialized"); return offset;

@ -25,6 +25,7 @@
#ifndef SHARE_CLASSFILE_JAVACLASSESIMPL_HPP
#define SHARE_CLASSFILE_JAVACLASSESIMPL_HPP
#include "cds/serializeClosure.hpp"
#include "classfile/javaClasses.hpp"
#include "runtime/continuationJavaClasses.hpp"
#include "utilities/macros.hpp"

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "cds/serializeClosure.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/vmSymbols.hpp"
#include "compiler/compilerDirectives.hpp"

@ -32,6 +32,8 @@
#include "utilities/macros.hpp"
#include "utilities/enumIterator.hpp"
class SerializeClosure;
// The class vmSymbols is a name space for fast lookup of
// symbols commonly used in the VM.
//

@ -328,45 +328,6 @@ public:
virtual bool should_return_fine_grain() { return false; }
};
// Abstract closure for serializing data (read or write).
class SerializeClosure : public Closure {
public:
// Return bool indicating whether closure implements read or write.
virtual bool reading() const = 0;
// Read/write the void pointer pointed to by p.
virtual void do_ptr(void** p) = 0;
// Read/write the 32-bit unsigned integer pointed to by p.
virtual void do_u4(u4* p) = 0;
// Read/write the int pointed to by p.
virtual void do_int(int* p) = 0;
// Read/write the bool pointed to by p.
virtual void do_bool(bool* p) = 0;
// Read/write the region specified.
virtual void do_region(u_char* start, size_t size) = 0;
// Check/write the tag. If reading, then compare the tag against
// the passed in value and fail is they don't match. This allows
// for verification that sections of the serialized data are of the
// correct length.
virtual void do_tag(int tag) = 0;
// Read/write the oop
virtual void do_oop(oop* o) = 0;
bool writing() {
return !reading();
}
// Useful alias
template <typename T> void do_ptr(T** p) { do_ptr((void**)p); }
};
class SymbolClosure : public StackObj {
public:
virtual void do_symbol(Symbol**) = 0;

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "cds/serializeClosure.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "memory/iterator.inline.hpp"

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "cds/serializeClosure.hpp"
#include "classfile/vmClasses.hpp"
#include "compiler/oopMap.inline.hpp"
#include "gc/shared/gc_globals.hpp"

@ -54,10 +54,9 @@ public class DeterministicDump {
baseArgs.add("-Xmx128M");
if (Platform.is64bit()) {
// These options are available only on 64-bit.
// This option is available only on 64-bit.
String sign = (compressed) ? "+" : "-";
baseArgs.add("-XX:" + sign + "UseCompressedOops");
baseArgs.add("-XX:" + sign + "UseCompressedClassPointers");
}
String baseArchive = dump(baseArgs);