8309065: Move the logic to determine archive heap location from CDS to G1 GC

Reviewed-by: iklam, tschatzl, stuefe
This commit is contained in:
Ashutosh Mehra 2023-06-12 22:08:47 +00:00 committed by Ioi Lam
parent 80a8144af5
commit 3028295fdd
4 changed files with 61 additions and 103 deletions

@ -2085,26 +2085,7 @@ address FileMapInfo::heap_region_requested_address() {
}
}
// The address where this shared heap region is actually mapped at runtime. This function
// can be called only after we have determined the value for ArchiveHeapLoader::mapped_heap_delta().
address FileMapInfo::heap_region_mapped_address() {
assert(UseSharedSpaces, "runtime only");
assert(ArchiveHeapLoader::can_map(), "cannot be used by ArchiveHeapLoader::can_load() mode");
return heap_region_requested_address() + ArchiveHeapLoader::mapped_heap_delta();
}
bool FileMapInfo::map_heap_region() {
init_heap_region_relocation();
if (_heap_pointers_need_patching) {
char* bitmap_base = map_bitmap_region();
if (bitmap_base == nullptr) {
log_info(cds)("CDS heap cannot be used because bitmap region cannot be mapped");
_heap_pointers_need_patching = false;
return false;
}
}
if (map_heap_region_impl()) {
#ifdef ASSERT
// The "old" regions must be parsable -- we cannot have any unused space
@ -2130,66 +2111,28 @@ bool FileMapInfo::map_heap_region() {
}
}
void FileMapInfo::init_heap_region_relocation() {
assert(UseG1GC, "the following code assumes G1");
_heap_pointers_need_patching = false;
MemRegion heap_range = G1CollectedHeap::heap()->reserved();
MemRegion archive_range = get_heap_region_requested_range();
address requested_bottom = (address)archive_range.start();
address heap_end = (address)heap_range.end();
assert(is_aligned(heap_end, HeapRegion::GrainBytes), "must be");
// We map the archive heap region at the very top of the heap to avoid fragmentation.
// To do that, we make sure that the bottom of the archived region is at the same
// address as the bottom of the highest possible G1 region.
address mapped_bottom = heap_end - align_up(archive_range.byte_size(), HeapRegion::GrainBytes);
if (UseCompressedOops &&
(narrow_oop_mode() != CompressedOops::mode() ||
narrow_oop_shift() != CompressedOops::shift())) {
log_info(cds)("CDS heap data needs to be relocated because the archive was created with an incompatible oop encoding mode.");
_heap_pointers_need_patching = true;
} else if (requested_bottom != mapped_bottom) {
log_info(cds)("CDS heap data needs to be relocated because it is mapped at a different address @ " INTPTR_FORMAT,
p2i(mapped_bottom));
_heap_pointers_need_patching = true;
}
ptrdiff_t delta = 0;
if (_heap_pointers_need_patching) {
delta = mapped_bottom - requested_bottom;
}
log_info(cds)("CDS heap data relocation delta = " INTX_FORMAT " bytes", delta);
ArchiveHeapLoader::init_mapped_heap_relocation(delta, narrow_oop_shift());
}
bool FileMapInfo::map_heap_region_impl() {
assert(UseG1GC, "the following code assumes G1");
FileMapRegion* r = region_at(MetaspaceShared::hp);
size_t size = r->used();
if (size > 0) {
HeapWord* start = (HeapWord*)heap_region_mapped_address();
_mapped_heap_memregion = MemRegion(start, size / HeapWordSize);
log_info(cds)("Trying to map heap data at " INTPTR_FORMAT ", size = " SIZE_FORMAT_W(8) " bytes",
p2i(start), size);
} else {
if (size == 0) {
return false; // no archived java heap data
}
// Check that the region is within the java heap
if (!G1CollectedHeap::heap()->check_archive_addresses(_mapped_heap_memregion)) {
log_info(cds)("Unable to allocate region, range is not within java heap.");
size_t word_size = size / HeapWordSize;
address requested_start = heap_region_requested_address();
log_info(cds)("Preferred address to map heap data (to avoid relocation) is " INTPTR_FORMAT, p2i(requested_start));
// allocate from java heap
HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size, (HeapWord*)requested_start);
if (start == nullptr) {
log_info(cds)("UseSharedSpaces: Unable to allocate java heap region for archive heap.");
return false;
}
// allocate from java heap
if (!G1CollectedHeap::heap()->alloc_archive_regions(_mapped_heap_memregion)) {
log_info(cds)("Unable to allocate region, java heap range is already in use.");
return false;
}
_mapped_heap_memregion = MemRegion(start, word_size);
// Map the archived heap data. No need to call MemTracker::record_virtual_memory_type()
// for mapped region as it is part of the reserved java heap, which is already recorded.
@ -2208,10 +2151,37 @@ bool FileMapInfo::map_heap_region_impl() {
r->set_mapped_base(base);
if (VerifySharedSpaces && !r->check_region_crc()) {
dealloc_heap_region();
log_info(cds)("mapped heap region is corrupt");
log_info(cds)("UseSharedSpaces: mapped heap region is corrupt");
return false;
}
// If the requested range is different from the range allocated by GC, then
// the pointers need to be patched.
address mapped_start = (address) _mapped_heap_memregion.start();
ptrdiff_t delta = mapped_start - requested_start;
if (UseCompressedOops &&
(narrow_oop_mode() != CompressedOops::mode() ||
narrow_oop_shift() != CompressedOops::shift())) {
_heap_pointers_need_patching = true;
}
if (delta != 0) {
_heap_pointers_need_patching = true;
}
ArchiveHeapLoader::init_mapped_heap_relocation(delta, narrow_oop_shift());
if (_heap_pointers_need_patching) {
char* bitmap_base = map_bitmap_region();
if (bitmap_base == NULL) {
log_info(cds)("CDS heap cannot be used because bitmap region cannot be mapped");
dealloc_heap_region();
unmap_region(MetaspaceShared::hp);
_heap_pointers_need_patching = false;
return false;
}
}
log_info(cds)("Heap data mapped at " INTPTR_FORMAT ", size = " SIZE_FORMAT_W(8) " bytes",
p2i(mapped_start), _mapped_heap_memregion.byte_size());
log_info(cds)("CDS heap data relocation delta = " INTX_FORMAT " bytes", delta);
return true;
}
@ -2265,7 +2235,6 @@ void FileMapInfo::unmap_regions(int regions[], int num_regions) {
// Unmap a memory region in the address space.
void FileMapInfo::unmap_region(int i) {
assert(!HeapShared::is_heap_region(i), "sanity");
FileMapRegion* r = region_at(i);
char* mapped_base = r->mapped_base();
size_t size = r->used_aligned();

@ -561,7 +561,6 @@ public:
public:
address heap_region_dumptime_address() NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
address heap_region_requested_address() NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
address heap_region_mapped_address() NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
narrowOop encoded_heap_region_dumptime_address();
private:

@ -511,10 +511,6 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
return nullptr;
}
bool G1CollectedHeap::check_archive_addresses(MemRegion range) {
return _hrm.reserved().contains(range);
}
template <typename Func>
void G1CollectedHeap::iterate_regions_in_range(MemRegion range, const Func& func) {
// Mark each G1 region touched by the range as old, add it to
@ -532,37 +528,34 @@ void G1CollectedHeap::iterate_regions_in_range(MemRegion range, const Func& func
}
}
bool G1CollectedHeap::alloc_archive_regions(MemRegion range) {
HeapWord* G1CollectedHeap::alloc_archive_region(size_t word_size, HeapWord* preferred_addr) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
MutexLocker x(Heap_lock);
MemRegion reserved = _hrm.reserved();
if (reserved.word_size() <= word_size) {
log_info(gc, heap)("Unable to allocate regions as archive heap is too large; size requested = " SIZE_FORMAT
" bytes, heap = " SIZE_FORMAT " bytes", word_size, reserved.word_size());
return nullptr;
}
// Temporarily disable pretouching of heap pages. This interface is used
// when mmap'ing archived heap data in, so pre-touching is wasted.
FlagSetting fs(AlwaysPreTouch, false);
// For the specified MemRegion range, allocate the corresponding G1
// region(s) and mark them as old region(s).
HeapWord* start_address = range.start();
size_t word_size = range.word_size();
HeapWord* last_address = range.last();
size_t commits = 0;
guarantee(reserved.contains(start_address) && reserved.contains(last_address),
"MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
p2i(start_address), p2i(last_address));
// Perform the actual region allocation, exiting if it fails.
// Then note how much new space we have allocated.
// Attempt to allocate towards the end of the heap.
HeapWord* start_addr = reserved.end() - align_up(word_size, HeapRegion::GrainWords);
MemRegion range = MemRegion(start_addr, word_size);
HeapWord* last_address = range.last();
if (!_hrm.allocate_containing_regions(range, &commits, workers())) {
return false;
return nullptr;
}
increase_used(word_size * HeapWordSize);
if (commits != 0) {
log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
HeapRegion::GrainWords * HeapWordSize * commits);
}
// Mark each G1 region touched by the range as old, add it to
@ -579,7 +572,7 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion range) {
};
iterate_regions_in_range(range, set_region_to_old);
return true;
return start_addr;
}
void G1CollectedHeap::populate_archive_regions_bot_part(MemRegion range) {

@ -700,21 +700,18 @@ public:
void free_humongous_region(HeapRegion* hr,
FreeRegionList* free_list);
// Facility for allocating a fixed range within the heap and marking
// the containing regions as 'old'. For use at JVM init time, when the
// caller may mmap archived heap data at the specified range.
// Verify that the range is within the reserved heap.
bool check_archive_addresses(MemRegion range);
// Execute func(HeapRegion* r, bool is_last) on every region covered by the
// given range.
template <typename Func>
void iterate_regions_in_range(MemRegion range, const Func& func);
// Commit the appropriate G1 region(s) containing the specified range
// and mark them as 'old' region(s).
bool alloc_archive_regions(MemRegion range);
// Commit the required number of G1 region(s) according to the size requested
// and mark them as 'old' region(s). Preferred address is treated as a hint for
// the location of the archive space in the heap. The returned address may or may
// not be same as the preferred address.
// This API is only used for allocating heap space for the archived heap objects
// in the CDS archive.
HeapWord* alloc_archive_region(size_t word_size, HeapWord* preferred_addr);
// Populate the G1BlockOffsetTablePart for archived regions with the given
// memory range.