8296344: Remove dependency on G1 for writing the CDS archive heap
Reviewed-by: ayang, tschatzl, ccheung
This commit is contained in:
parent
f1d76fa925
commit
bdcbafb219
src/hotspot/share
cds
archiveBuilder.cpparchiveBuilder.hpparchiveHeapWriter.cpparchiveHeapWriter.hppcdsHeapVerifier.cppfilemap.cppheapShared.cppheapShared.hppmetaspaceShared.cpp
classfile
gc
g1
g1Allocator.cppg1Allocator.hppg1CollectedHeap.cppg1CollectedHeap.hppg1HeapVerifier.cppg1HeapVerifier.hpp
shared
oops
test/hotspot/jtreg
TEST.groups
runtime/cds/appcds
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "cds/archiveBuilder.hpp"
|
||||
#include "cds/archiveHeapWriter.hpp"
|
||||
#include "cds/archiveUtils.hpp"
|
||||
#include "cds/cppVtables.hpp"
|
||||
#include "cds/dumpAllocStats.hpp"
|
||||
@ -830,14 +831,11 @@ uintx ArchiveBuilder::any_to_offset(address p) const {
|
||||
return buffer_to_offset(p);
|
||||
}
|
||||
|
||||
// Update a Java object to point its Klass* to the address whene
|
||||
// the class would be mapped at runtime.
|
||||
void ArchiveBuilder::relocate_klass_ptr_of_oop(oop o) {
|
||||
narrowKlass ArchiveBuilder::get_requested_narrow_klass(Klass* k) {
|
||||
assert(DumpSharedSpaces, "sanity");
|
||||
Klass* k = get_buffered_klass(o->klass());
|
||||
k = get_buffered_klass(k);
|
||||
Klass* requested_k = to_requested(k);
|
||||
narrowKlass nk = CompressedKlassPointers::encode_not_null(requested_k, _requested_static_archive_bottom);
|
||||
o->set_narrow_klass(nk);
|
||||
return CompressedKlassPointers::encode_not_null(requested_k, _requested_static_archive_bottom);
|
||||
}
|
||||
|
||||
// RelocateBufferToRequested --- Relocate all the pointers in rw/ro,
|
||||
@ -1062,19 +1060,18 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
|
||||
|
||||
while (start < end) {
|
||||
size_t byte_size;
|
||||
oop archived_oop = cast_to_oop(start);
|
||||
oop original_oop = HeapShared::get_original_object(archived_oop);
|
||||
oop original_oop = ArchiveHeapWriter::buffered_addr_to_source_obj(start);
|
||||
if (original_oop != nullptr) {
|
||||
ResourceMark rm;
|
||||
log_info(cds, map)(PTR_FORMAT ": @@ Object %s",
|
||||
p2i(to_requested(start)), original_oop->klass()->external_name());
|
||||
byte_size = original_oop->size() * BytesPerWord;
|
||||
} else if (archived_oop == HeapShared::roots()) {
|
||||
} else if (start == ArchiveHeapWriter::buffered_heap_roots_addr()) {
|
||||
// HeapShared::roots() is copied specially so it doesn't exist in
|
||||
// HeapShared::OriginalObjectTable. See HeapShared::copy_roots().
|
||||
log_info(cds, map)(PTR_FORMAT ": @@ Object HeapShared::roots (ObjArray)",
|
||||
p2i(to_requested(start)));
|
||||
byte_size = objArrayOopDesc::object_size(HeapShared::roots()->length()) * BytesPerWord;
|
||||
byte_size = ArchiveHeapWriter::heap_roots_word_size() * BytesPerWord;
|
||||
} else {
|
||||
// We have reached the end of the region
|
||||
break;
|
||||
@ -1091,7 +1088,7 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
|
||||
}
|
||||
}
|
||||
static address to_requested(address p) {
|
||||
return HeapShared::to_requested_address(p);
|
||||
return ArchiveHeapWriter::buffered_addr_to_requested_addr(p);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -443,7 +443,7 @@ public:
|
||||
return alloc_stats()->string_stats();
|
||||
}
|
||||
|
||||
void relocate_klass_ptr_of_oop(oop o);
|
||||
narrowKlass get_requested_narrow_klass(Klass* k);
|
||||
|
||||
static Klass* get_buffered_klass(Klass* src_klass) {
|
||||
Klass* klass = (Klass*)current()->get_buffered_addr((address)src_klass);
|
||||
|
657
src/hotspot/share/cds/archiveHeapWriter.cpp
Normal file
657
src/hotspot/share/cds/archiveHeapWriter.cpp
Normal file
@ -0,0 +1,657 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "cds/archiveHeapWriter.hpp"
|
||||
#include "cds/filemap.hpp"
|
||||
#include "cds/heapShared.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/compressedOops.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/objArrayOop.inline.hpp"
|
||||
#include "oops/oopHandle.inline.hpp"
|
||||
#include "oops/typeArrayKlass.hpp"
|
||||
#include "oops/typeArrayOop.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
#if INCLUDE_G1GC
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
|
||||
|
||||
GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer;
|
||||
|
||||
// The following are offsets from buffer_bottom()
|
||||
size_t ArchiveHeapWriter::_buffer_top;
|
||||
size_t ArchiveHeapWriter::_open_bottom;
|
||||
size_t ArchiveHeapWriter::_open_top;
|
||||
size_t ArchiveHeapWriter::_closed_bottom;
|
||||
size_t ArchiveHeapWriter::_closed_top;
|
||||
size_t ArchiveHeapWriter::_heap_roots_bottom;
|
||||
|
||||
size_t ArchiveHeapWriter::_heap_roots_word_size;
|
||||
|
||||
address ArchiveHeapWriter::_requested_open_region_bottom;
|
||||
address ArchiveHeapWriter::_requested_open_region_top;
|
||||
address ArchiveHeapWriter::_requested_closed_region_bottom;
|
||||
address ArchiveHeapWriter::_requested_closed_region_top;
|
||||
|
||||
ResourceBitMap* ArchiveHeapWriter::_closed_oopmap;
|
||||
ResourceBitMap* ArchiveHeapWriter::_open_oopmap;
|
||||
|
||||
GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers;
|
||||
GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs;
|
||||
|
||||
ArchiveHeapWriter::BufferOffsetToSourceObjectTable*
|
||||
ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
|
||||
|
||||
void ArchiveHeapWriter::init() {
|
||||
if (HeapShared::can_write()) {
|
||||
Universe::heap()->collect(GCCause::_java_lang_system_gc);
|
||||
|
||||
_buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable();
|
||||
|
||||
_requested_open_region_bottom = nullptr;
|
||||
_requested_open_region_top = nullptr;
|
||||
_requested_closed_region_bottom = nullptr;
|
||||
_requested_closed_region_top = nullptr;
|
||||
|
||||
_native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048);
|
||||
_source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
|
||||
|
||||
guarantee(UseG1GC, "implementation limitation");
|
||||
guarantee(MIN_GC_REGION_ALIGNMENT <= /*G1*/HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
|
||||
}
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::add_source_obj(oop src_obj) {
|
||||
_source_objs->append(src_obj);
|
||||
}
|
||||
|
||||
// For the time being, always support two regions (to be strictly compatible with existing G1
|
||||
// mapping code. We might eventually use a single region (JDK-8298048).
|
||||
void ArchiveHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
|
||||
GrowableArray<MemRegion>* closed_regions, GrowableArray<MemRegion>* open_regions,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps) {
|
||||
assert(HeapShared::can_write(), "sanity");
|
||||
allocate_buffer();
|
||||
copy_source_objs_to_buffer(roots);
|
||||
set_requested_address_for_regions(closed_regions, open_regions);
|
||||
relocate_embedded_oops(roots, closed_bitmaps, open_bitmaps);
|
||||
}
|
||||
|
||||
bool ArchiveHeapWriter::is_too_large_to_archive(oop o) {
|
||||
return is_too_large_to_archive(o->size());
|
||||
}
|
||||
|
||||
bool ArchiveHeapWriter::is_string_too_large_to_archive(oop string) {
|
||||
typeArrayOop value = java_lang_String::value_no_keepalive(string);
|
||||
return is_too_large_to_archive(value);
|
||||
}
|
||||
|
||||
bool ArchiveHeapWriter::is_too_large_to_archive(size_t size) {
|
||||
assert(size > 0, "no zero-size object");
|
||||
assert(size * HeapWordSize > size, "no overflow");
|
||||
static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive");
|
||||
|
||||
size_t byte_size = size * HeapWordSize;
|
||||
if (byte_size > size_t(MIN_GC_REGION_ALIGNMENT)) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Various lookup functions between source_obj, buffered_obj and requested_obj
|
||||
bool ArchiveHeapWriter::is_in_requested_regions(oop o) {
|
||||
assert(_requested_open_region_bottom != nullptr, "do not call before this is initialized");
|
||||
assert(_requested_closed_region_bottom != nullptr, "do not call before this is initialized");
|
||||
|
||||
address a = cast_from_oop<address>(o);
|
||||
return (_requested_open_region_bottom <= a && a < _requested_open_region_top) ||
|
||||
(_requested_closed_region_bottom <= a && a < _requested_closed_region_top);
|
||||
}
|
||||
|
||||
oop ArchiveHeapWriter::requested_obj_from_buffer_offset(size_t offset) {
|
||||
oop req_obj = cast_to_oop(_requested_open_region_bottom + offset);
|
||||
assert(is_in_requested_regions(req_obj), "must be");
|
||||
return req_obj;
|
||||
}
|
||||
|
||||
oop ArchiveHeapWriter::source_obj_to_requested_obj(oop src_obj) {
|
||||
assert(DumpSharedSpaces, "dump-time only");
|
||||
HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
|
||||
if (p != nullptr) {
|
||||
return requested_obj_from_buffer_offset(p->buffer_offset());
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
oop ArchiveHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
|
||||
oop* p = _buffer_offset_to_source_obj_table->get(buffered_address_to_offset(buffered_addr));
|
||||
if (p != nullptr) {
|
||||
return *p;
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
address ArchiveHeapWriter::buffered_addr_to_requested_addr(address buffered_addr) {
|
||||
return _requested_open_region_bottom + buffered_address_to_offset(buffered_addr);
|
||||
}
|
||||
|
||||
oop ArchiveHeapWriter::heap_roots_requested_address() {
|
||||
return requested_obj_from_buffer_offset(_heap_roots_bottom);
|
||||
}
|
||||
|
||||
address ArchiveHeapWriter::heap_region_requested_bottom(int heap_region_idx) {
|
||||
assert(_buffer != nullptr, "must be initialized");
|
||||
switch (heap_region_idx) {
|
||||
case MetaspaceShared::first_closed_heap_region:
|
||||
return _requested_closed_region_bottom;
|
||||
case MetaspaceShared::first_open_heap_region:
|
||||
return _requested_open_region_bottom;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::allocate_buffer() {
|
||||
int initial_buffer_size = 100000;
|
||||
_buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
|
||||
_open_bottom = _buffer_top = 0;
|
||||
ensure_buffer_space(1); // so that buffer_bottom() works
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::ensure_buffer_space(size_t min_bytes) {
|
||||
// We usually have very small heaps. If we get a huge one it's probably caused by a bug.
|
||||
guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
|
||||
_buffer->at_grow(to_array_index(min_bytes));
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
|
||||
Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
|
||||
int length = roots != nullptr ? roots->length() : 0;
|
||||
_heap_roots_word_size = objArrayOopDesc::object_size(length);
|
||||
size_t byte_size = _heap_roots_word_size * HeapWordSize;
|
||||
if (byte_size >= MIN_GC_REGION_ALIGNMENT) {
|
||||
log_error(cds, heap)("roots array is too large. Please reduce the number of classes");
|
||||
vm_exit(1);
|
||||
}
|
||||
|
||||
maybe_fill_gc_region_gap(byte_size);
|
||||
|
||||
size_t new_top = _buffer_top + byte_size;
|
||||
ensure_buffer_space(new_top);
|
||||
|
||||
HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_top);
|
||||
memset(mem, 0, byte_size);
|
||||
{
|
||||
// This is copied from MemAllocator::finish
|
||||
oopDesc::set_mark(mem, markWord::prototype());
|
||||
oopDesc::release_set_klass(mem, k);
|
||||
}
|
||||
{
|
||||
// This is copied from ObjArrayAllocator::initialize
|
||||
arrayOopDesc::set_length(mem, length);
|
||||
}
|
||||
|
||||
objArrayOop arrayOop = objArrayOop(cast_to_oop(mem));
|
||||
for (int i = 0; i < length; i++) {
|
||||
// Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
|
||||
oop o = roots->at(i);
|
||||
if (UseCompressedOops) {
|
||||
* arrayOop->obj_at_addr<narrowOop>(i) = CompressedOops::encode(o);
|
||||
} else {
|
||||
* arrayOop->obj_at_addr<oop>(i) = o;
|
||||
}
|
||||
}
|
||||
log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
|
||||
|
||||
_heap_roots_bottom = _buffer_top;
|
||||
_buffer_top = new_top;
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
|
||||
copy_source_objs_to_buffer_by_region(/*copy_open_region=*/true);
|
||||
copy_roots_to_buffer(roots);
|
||||
_open_top = _buffer_top;
|
||||
|
||||
// Align the closed region to the next G1 region
|
||||
_buffer_top = _closed_bottom = align_up(_buffer_top, HeapRegion::GrainBytes);
|
||||
copy_source_objs_to_buffer_by_region(/*copy_open_region=*/false);
|
||||
_closed_top = _buffer_top;
|
||||
|
||||
log_info(cds, heap)("Size of open region = " SIZE_FORMAT " bytes", _open_top - _open_bottom);
|
||||
log_info(cds, heap)("Size of closed region = " SIZE_FORMAT " bytes", _closed_top - _closed_bottom);
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::copy_source_objs_to_buffer_by_region(bool copy_open_region) {
|
||||
for (int i = 0; i < _source_objs->length(); i++) {
|
||||
oop src_obj = _source_objs->at(i);
|
||||
HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
|
||||
assert(info != nullptr, "must be");
|
||||
if (info->in_open_region() == copy_open_region) {
|
||||
// For region-based collectors such as G1, we need to make sure that we don't have
|
||||
// an object that can possible span across two regions.
|
||||
size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
|
||||
info->set_buffer_offset(buffer_offset);
|
||||
|
||||
_buffer_offset_to_source_obj_table->put(buffer_offset, src_obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
|
||||
size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
|
||||
return byte_size;
|
||||
}
|
||||
|
||||
int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
|
||||
assert(is_object_aligned(fill_bytes), "must be");
|
||||
size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
|
||||
|
||||
int initial_length = to_array_length(fill_bytes / elemSize);
|
||||
for (int length = initial_length; length >= 0; length --) {
|
||||
size_t array_byte_size = filler_array_byte_size(length);
|
||||
if (array_byte_size == fill_bytes) {
|
||||
return length;
|
||||
}
|
||||
}
|
||||
|
||||
ShouldNotReachHere();
|
||||
return -1;
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
|
||||
assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
|
||||
Klass* oak = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
|
||||
HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_top);
|
||||
memset(mem, 0, fill_bytes);
|
||||
oopDesc::set_mark(mem, markWord::prototype());
|
||||
narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
|
||||
cast_to_oop(mem)->set_narrow_klass(nk);
|
||||
arrayOopDesc::set_length(mem, array_length);
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
|
||||
// We fill only with arrays (so we don't need to use a single HeapWord filler if the
|
||||
// leftover space is smaller than a zero-sized array object). Therefore, we need to
|
||||
// make sure there's enough space of min_filler_byte_size in the current region after
|
||||
// required_byte_size has been allocated. If not, fill the remainder of the current
|
||||
// region.
|
||||
size_t min_filler_byte_size = filler_array_byte_size(0);
|
||||
size_t new_top = _buffer_top + required_byte_size + min_filler_byte_size;
|
||||
|
||||
const size_t cur_min_region_bottom = align_down(_buffer_top, MIN_GC_REGION_ALIGNMENT);
|
||||
const size_t next_min_region_bottom = align_down(new_top, MIN_GC_REGION_ALIGNMENT);
|
||||
|
||||
if (cur_min_region_bottom != next_min_region_bottom) {
|
||||
// Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
|
||||
// we can map the region in any region-based collector.
|
||||
assert(next_min_region_bottom > cur_min_region_bottom, "must be");
|
||||
assert(next_min_region_bottom - cur_min_region_bottom == MIN_GC_REGION_ALIGNMENT,
|
||||
"no buffered object can be larger than %d bytes", MIN_GC_REGION_ALIGNMENT);
|
||||
|
||||
const size_t filler_end = next_min_region_bottom;
|
||||
const size_t fill_bytes = filler_end - _buffer_top;
|
||||
assert(fill_bytes > 0, "must be");
|
||||
ensure_buffer_space(filler_end);
|
||||
|
||||
int array_length = filler_array_length(fill_bytes);
|
||||
log_info(cds, heap)("Inserting filler obj array of %d elements (" SIZE_FORMAT " bytes total) @ buffer offset " SIZE_FORMAT,
|
||||
array_length, fill_bytes, _buffer_top);
|
||||
init_filler_array_at_buffer_top(array_length, fill_bytes);
|
||||
|
||||
_buffer_top = filler_end;
|
||||
}
|
||||
}
|
||||
|
||||
size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
|
||||
assert(!is_too_large_to_archive(src_obj), "already checked");
|
||||
size_t byte_size = src_obj->size() * HeapWordSize;
|
||||
assert(byte_size > 0, "no zero-size objects");
|
||||
|
||||
maybe_fill_gc_region_gap(byte_size);
|
||||
|
||||
size_t new_top = _buffer_top + byte_size;
|
||||
assert(new_top > _buffer_top, "no wrap around");
|
||||
|
||||
size_t cur_min_region_bottom = align_down(_buffer_top, MIN_GC_REGION_ALIGNMENT);
|
||||
size_t next_min_region_bottom = align_down(new_top, MIN_GC_REGION_ALIGNMENT);
|
||||
assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
|
||||
|
||||
ensure_buffer_space(new_top);
|
||||
|
||||
address from = cast_from_oop<address>(src_obj);
|
||||
address to = offset_to_buffered_address<address>(_buffer_top);
|
||||
assert(is_object_aligned(_buffer_top), "sanity");
|
||||
assert(is_object_aligned(byte_size), "sanity");
|
||||
memcpy(to, from, byte_size);
|
||||
|
||||
size_t buffered_obj_offset = _buffer_top;
|
||||
_buffer_top = new_top;
|
||||
|
||||
return buffered_obj_offset;
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::set_requested_address_for_regions(GrowableArray<MemRegion>* closed_regions,
|
||||
GrowableArray<MemRegion>* open_regions) {
|
||||
assert(closed_regions->length() == 0, "must be");
|
||||
assert(open_regions->length() == 0, "must be");
|
||||
|
||||
assert(UseG1GC, "must be");
|
||||
address heap_end = (address)G1CollectedHeap::heap()->reserved().end();
|
||||
log_info(cds, heap)("Heap end = %p", heap_end);
|
||||
|
||||
size_t closed_region_byte_size = _closed_top - _closed_bottom;
|
||||
size_t open_region_byte_size = _open_top - _open_bottom;
|
||||
assert(closed_region_byte_size > 0, "must archived at least one object for closed region!");
|
||||
assert(open_region_byte_size > 0, "must archived at least one object for open region!");
|
||||
|
||||
// The following two asserts are ensured by copy_source_objs_to_buffer_by_region().
|
||||
assert(is_aligned(_closed_bottom, HeapRegion::GrainBytes), "sanity");
|
||||
assert(is_aligned(_open_bottom, HeapRegion::GrainBytes), "sanity");
|
||||
|
||||
_requested_closed_region_bottom = align_down(heap_end - closed_region_byte_size, HeapRegion::GrainBytes);
|
||||
_requested_open_region_bottom = _requested_closed_region_bottom - (_closed_bottom - _open_bottom);
|
||||
|
||||
assert(is_aligned(_requested_closed_region_bottom, HeapRegion::GrainBytes), "sanity");
|
||||
assert(is_aligned(_requested_open_region_bottom, HeapRegion::GrainBytes), "sanity");
|
||||
|
||||
_requested_open_region_top = _requested_open_region_bottom + (_open_top - _open_bottom);
|
||||
_requested_closed_region_top = _requested_closed_region_bottom + (_closed_top - _closed_bottom);
|
||||
|
||||
assert(_requested_open_region_top <= _requested_closed_region_bottom, "no overlap");
|
||||
|
||||
closed_regions->append(MemRegion(offset_to_buffered_address<HeapWord*>(_closed_bottom),
|
||||
offset_to_buffered_address<HeapWord*>(_closed_top)));
|
||||
open_regions->append( MemRegion(offset_to_buffered_address<HeapWord*>(_open_bottom),
|
||||
offset_to_buffered_address<HeapWord*>(_open_top)));
|
||||
}
|
||||
|
||||
// Oop relocation
|
||||
|
||||
template <typename T> T* ArchiveHeapWriter::requested_addr_to_buffered_addr(T* p) {
|
||||
assert(is_in_requested_regions(cast_to_oop(p)), "must be");
|
||||
|
||||
address addr = address(p);
|
||||
assert(addr >= _requested_open_region_bottom, "must be");
|
||||
size_t offset = addr - _requested_open_region_bottom;
|
||||
return offset_to_buffered_address<T*>(offset);
|
||||
}
|
||||
|
||||
template <typename T> oop ArchiveHeapWriter::load_source_oop_from_buffer(T* buffered_addr) {
|
||||
oop o = load_oop_from_buffer(buffered_addr);
|
||||
assert(!in_buffer(cast_from_oop<address>(o)), "must point to source oop");
|
||||
return o;
|
||||
}
|
||||
|
||||
template <typename T> void ArchiveHeapWriter::store_requested_oop_in_buffer(T* buffered_addr,
|
||||
oop request_oop) {
|
||||
assert(is_in_requested_regions(request_oop), "must be");
|
||||
store_oop_in_buffer(buffered_addr, request_oop);
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) {
|
||||
// Make heap content deterministic. See comments inside HeapShared::to_requested_address.
|
||||
*buffered_addr = HeapShared::to_requested_address(requested_obj);
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) {
|
||||
// Note: HeapShared::to_requested_address() is not necessary because
|
||||
// the heap always starts at a deterministic address with UseCompressedOops==true.
|
||||
narrowOop val = CompressedOops::encode_not_null(requested_obj);
|
||||
*buffered_addr = val;
|
||||
}
|
||||
|
||||
oop ArchiveHeapWriter::load_oop_from_buffer(oop* buffered_addr) {
|
||||
return *buffered_addr;
|
||||
}
|
||||
|
||||
oop ArchiveHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
|
||||
return CompressedOops::decode(*buffered_addr);
|
||||
}
|
||||
|
||||
template <typename T> void ArchiveHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer) {
|
||||
oop source_referent = load_source_oop_from_buffer<T>(field_addr_in_buffer);
|
||||
if (!CompressedOops::is_null(source_referent)) {
|
||||
oop request_referent = source_obj_to_requested_obj(source_referent);
|
||||
store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
|
||||
mark_oop_pointer<T>(field_addr_in_buffer);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T> void ArchiveHeapWriter::mark_oop_pointer(T* buffered_addr) {
|
||||
T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr));
|
||||
ResourceBitMap* oopmap;
|
||||
address requested_region_bottom;
|
||||
|
||||
if (request_p >= (T*)_requested_closed_region_bottom) {
|
||||
assert(request_p < (T*)_requested_closed_region_top, "sanity");
|
||||
oopmap = _closed_oopmap;
|
||||
requested_region_bottom = _requested_closed_region_bottom;
|
||||
} else {
|
||||
assert(request_p >= (T*)_requested_open_region_bottom, "sanity");
|
||||
assert(request_p < (T*)_requested_open_region_top, "sanity");
|
||||
oopmap = _open_oopmap;
|
||||
requested_region_bottom = _requested_open_region_bottom;
|
||||
}
|
||||
|
||||
// Mark the pointer in the oopmap
|
||||
T* region_bottom = (T*)requested_region_bottom;
|
||||
assert(request_p >= region_bottom, "must be");
|
||||
BitMap::idx_t idx = request_p - region_bottom;
|
||||
assert(idx < oopmap->size(), "overflow");
|
||||
oopmap->set_bit(idx);
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
|
||||
assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
|
||||
narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
|
||||
address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
|
||||
|
||||
oop fake_oop = cast_to_oop(buffered_addr);
|
||||
fake_oop->set_narrow_klass(nk);
|
||||
|
||||
// We need to retain the identity_hash, because it may have been used by some hashtables
|
||||
// in the shared heap. This also has the side effect of pre-initializing the
|
||||
// identity_hash for all shared objects, so they are less likely to be written
|
||||
// into during run time, increasing the potential of memory sharing.
|
||||
if (src_obj != nullptr) {
|
||||
int src_hash = src_obj->identity_hash();
|
||||
fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
|
||||
assert(fake_oop->mark().is_unlocked(), "sanity");
|
||||
|
||||
DEBUG_ONLY(int archived_hash = fake_oop->identity_hash());
|
||||
assert(src_hash == archived_hash, "Different hash codes: original %x, archived %x", src_hash, archived_hash);
|
||||
}
|
||||
}
|
||||
|
||||
// Relocate an element in the buffered copy of HeapShared::roots()
|
||||
template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index) {
|
||||
size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(index);
|
||||
relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr() + offset));
|
||||
}
|
||||
|
||||
class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
|
||||
oop _src_obj;
|
||||
address _buffered_obj;
|
||||
|
||||
public:
|
||||
EmbeddedOopRelocator(oop src_obj, address buffered_obj) :
|
||||
_src_obj(src_obj), _buffered_obj(buffered_obj) {}
|
||||
|
||||
void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
|
||||
void do_oop( oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
|
||||
|
||||
private:
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));
|
||||
ArchiveHeapWriter::relocate_field_in_buffer<T>((T*)(_buffered_obj + field_offset));
|
||||
}
|
||||
};
|
||||
|
||||
// Update all oop fields embedded in the buffered objects
|
||||
void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps) {
|
||||
size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
|
||||
size_t closed_region_byte_size = _closed_top - _closed_bottom;
|
||||
size_t open_region_byte_size = _open_top - _open_bottom;
|
||||
ResourceBitMap closed_oopmap(closed_region_byte_size / oopmap_unit);
|
||||
ResourceBitMap open_oopmap (open_region_byte_size / oopmap_unit);
|
||||
|
||||
_closed_oopmap = &closed_oopmap;
|
||||
_open_oopmap = &open_oopmap;
|
||||
|
||||
auto iterator = [&] (oop src_obj, HeapShared::CachedOopInfo& info) {
|
||||
oop requested_obj = requested_obj_from_buffer_offset(info.buffer_offset());
|
||||
update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass());
|
||||
|
||||
address buffered_obj = offset_to_buffered_address<address>(info.buffer_offset());
|
||||
EmbeddedOopRelocator relocator(src_obj, buffered_obj);
|
||||
|
||||
src_obj->oop_iterate(&relocator);
|
||||
};
|
||||
HeapShared::archived_object_cache()->iterate_all(iterator);
|
||||
|
||||
// Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and
|
||||
// doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it.
|
||||
oop requested_roots = requested_obj_from_buffer_offset(_heap_roots_bottom);
|
||||
update_header_for_requested_obj(requested_roots, nullptr, Universe::objectArrayKlassObj());
|
||||
int length = roots != nullptr ? roots->length() : 0;
|
||||
for (int i = 0; i < length; i++) {
|
||||
if (UseCompressedOops) {
|
||||
relocate_root_at<narrowOop>(requested_roots, i);
|
||||
} else {
|
||||
relocate_root_at<oop>(requested_roots, i);
|
||||
}
|
||||
}
|
||||
|
||||
closed_bitmaps->append(make_bitmap_info(&closed_oopmap, /*is_open=*/false, /*is_oopmap=*/true));
|
||||
open_bitmaps ->append(make_bitmap_info(&open_oopmap, /*is_open=*/false, /*is_oopmap=*/true));
|
||||
|
||||
closed_bitmaps->append(compute_ptrmap(/*is_open=*/false));
|
||||
open_bitmaps ->append(compute_ptrmap(/*is_open=*/true));
|
||||
|
||||
_closed_oopmap = nullptr;
|
||||
_open_oopmap = nullptr;
|
||||
}
|
||||
|
||||
ArchiveHeapBitmapInfo ArchiveHeapWriter::make_bitmap_info(ResourceBitMap* bitmap, bool is_open, bool is_oopmap) {
|
||||
size_t size_in_bits = bitmap->size();
|
||||
size_t size_in_bytes;
|
||||
uintptr_t* buffer;
|
||||
|
||||
if (size_in_bits > 0) {
|
||||
size_in_bytes = bitmap->size_in_bytes();
|
||||
buffer = (uintptr_t*)NEW_C_HEAP_ARRAY(char, size_in_bytes, mtInternal);
|
||||
bitmap->write_to(buffer, size_in_bytes);
|
||||
} else {
|
||||
size_in_bytes = 0;
|
||||
buffer = nullptr;
|
||||
}
|
||||
|
||||
log_info(cds, heap)("%s @ " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for %s heap region",
|
||||
is_oopmap ? "Oopmap" : "Ptrmap",
|
||||
p2i(buffer), size_in_bytes,
|
||||
is_open? "open" : "closed");
|
||||
|
||||
ArchiveHeapBitmapInfo info;
|
||||
info._map = (address)buffer;
|
||||
info._size_in_bits = size_in_bits;
|
||||
info._size_in_bytes = size_in_bytes;
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
|
||||
Metadata* ptr = src_obj->metadata_field_acquire(field_offset);
|
||||
if (ptr != nullptr) {
|
||||
NativePointerInfo info;
|
||||
info._src_obj = src_obj;
|
||||
info._field_offset = field_offset;
|
||||
_native_pointers->append(info);
|
||||
}
|
||||
}
|
||||
|
||||
ArchiveHeapBitmapInfo ArchiveHeapWriter::compute_ptrmap(bool is_open) {
|
||||
int num_non_null_ptrs = 0;
|
||||
Metadata** bottom = (Metadata**) (is_open ? _requested_open_region_bottom: _requested_closed_region_bottom);
|
||||
Metadata** top = (Metadata**) (is_open ? _requested_open_region_top: _requested_closed_region_top); // exclusive
|
||||
ResourceBitMap ptrmap(top - bottom);
|
||||
|
||||
for (int i = 0; i < _native_pointers->length(); i++) {
|
||||
NativePointerInfo info = _native_pointers->at(i);
|
||||
oop src_obj = info._src_obj;
|
||||
int field_offset = info._field_offset;
|
||||
HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
|
||||
if (p->in_open_region() == is_open) {
|
||||
// requested_field_addr = the address of this field in the requested space
|
||||
oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
|
||||
Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
|
||||
assert(bottom <= requested_field_addr && requested_field_addr < top, "range check");
|
||||
|
||||
// Mark this field in the bitmap
|
||||
BitMap::idx_t idx = requested_field_addr - bottom;
|
||||
ptrmap.set_bit(idx);
|
||||
num_non_null_ptrs ++;
|
||||
|
||||
// Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
|
||||
// this address if the RO/RW regions are mapped at the default location).
|
||||
|
||||
Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
|
||||
Metadata* native_ptr = *buffered_field_addr;
|
||||
assert(native_ptr != nullptr, "sanity");
|
||||
|
||||
address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
|
||||
address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
|
||||
*buffered_field_addr = (Metadata*)requested_native_ptr;
|
||||
}
|
||||
}
|
||||
|
||||
log_info(cds, heap)("compute_ptrmap: marked %d non-null native pointers for %s heap region",
|
||||
num_non_null_ptrs, is_open ? "open" : "closed");
|
||||
|
||||
if (num_non_null_ptrs == 0) {
|
||||
ResourceBitMap empty;
|
||||
return make_bitmap_info(&empty, is_open, /*is_oopmap=*/ false);
|
||||
} else {
|
||||
return make_bitmap_info(&ptrmap, is_open, /*is_oopmap=*/ false);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
202
src/hotspot/share/cds/archiveHeapWriter.hpp
Normal file
202
src/hotspot/share/cds/archiveHeapWriter.hpp
Normal file
@ -0,0 +1,202 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_CDS_ARCHIVEHEAPWRITER_HPP
|
||||
#define SHARE_CDS_ARCHIVEHEAPWRITER_HPP
|
||||
|
||||
#include "cds/heapShared.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "oops/oopHandle.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/resourceHash.hpp"
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
|
||||
struct ArchiveHeapBitmapInfo;
|
||||
class MemRegion;
|
||||
|
||||
class ArchiveHeapWriter : AllStatic {
|
||||
class EmbeddedOopRelocator;
|
||||
struct NativePointerInfo {
|
||||
oop _src_obj;
|
||||
int _field_offset;
|
||||
};
|
||||
|
||||
// The minimum region size of all collectors that are supported by CDS in
|
||||
// ArchiveHeapLoader::can_map() mode. Currently only G1 is supported. G1's region size
|
||||
// depends on -Xmx, but can never be smaller than 1 * M.
|
||||
// (TODO: Perhaps change to 256K to be compatible with Shenandoah)
|
||||
static constexpr int MIN_GC_REGION_ALIGNMENT = 1 * M;
|
||||
|
||||
// "source" vs "buffered" vs "requested"
|
||||
//
|
||||
// [1] HeapShared::archive_objects() identifies all of the oops that need to be stored
|
||||
// into the CDS archive. These are entered into HeapShared::archived_object_cache().
|
||||
// These are called "source objects"
|
||||
//
|
||||
// [2] ArchiveHeapWriter::write() copies all source objects into ArchiveHeapWriter::_buffer,
|
||||
// which is a GrowableArray that sites outside of the valid heap range. Therefore
|
||||
// we avoid using the addresses of these copies as oops. They are usually
|
||||
// called "buffered_addr" in the code (of the type "address").
|
||||
//
|
||||
// [3] Each archived object has a "requested address" -- at run time, if the object
|
||||
// can be mapped at this address, we can avoid relocation.
|
||||
//
|
||||
// Note: the design and convention is the same as for the archiving of Metaspace objects.
|
||||
// See archiveBuilder.hpp.
|
||||
|
||||
static GrowableArrayCHeap<u1, mtClassShared>* _buffer;
|
||||
|
||||
// The exclusive top of the last object that has been copied into this->_buffer.
|
||||
static size_t _buffer_top;
|
||||
|
||||
// The bounds of the open region inside this->_buffer.
|
||||
static size_t _open_bottom; // inclusive
|
||||
static size_t _open_top; // exclusive
|
||||
|
||||
// The bounds of the closed region inside this->_buffer.
|
||||
static size_t _closed_bottom; // inclusive
|
||||
static size_t _closed_top; // exclusive
|
||||
|
||||
// The bottom of the copy of Heap::roots() inside this->_buffer.
|
||||
static size_t _heap_roots_bottom;
|
||||
static size_t _heap_roots_word_size;
|
||||
|
||||
static address _requested_open_region_bottom;
|
||||
static address _requested_open_region_top;
|
||||
static address _requested_closed_region_bottom;
|
||||
static address _requested_closed_region_top;
|
||||
|
||||
static ResourceBitMap* _closed_oopmap;
|
||||
static ResourceBitMap* _open_oopmap;
|
||||
|
||||
static ArchiveHeapBitmapInfo _closed_oopmap_info;
|
||||
static ArchiveHeapBitmapInfo _open_oopmap_info;
|
||||
|
||||
static GrowableArrayCHeap<NativePointerInfo, mtClassShared>* _native_pointers;
|
||||
static GrowableArrayCHeap<oop, mtClassShared>* _source_objs;
|
||||
|
||||
typedef ResourceHashtable<size_t, oop,
|
||||
36137, // prime number
|
||||
AnyObj::C_HEAP,
|
||||
mtClassShared> BufferOffsetToSourceObjectTable;
|
||||
static BufferOffsetToSourceObjectTable* _buffer_offset_to_source_obj_table;
|
||||
|
||||
static void allocate_buffer();
|
||||
static void ensure_buffer_space(size_t min_bytes);
|
||||
|
||||
// Both Java bytearray and GrowableArraty use int indices and lengths. Do a safe typecast with range check
|
||||
static int to_array_index(size_t i) {
|
||||
assert(i <= (size_t)max_jint, "must be");
|
||||
return (size_t)i;
|
||||
}
|
||||
static int to_array_length(size_t n) {
|
||||
return to_array_index(n);
|
||||
}
|
||||
|
||||
template <typename T> static T offset_to_buffered_address(size_t offset) {
|
||||
return (T)(_buffer->adr_at(to_array_index(offset)));
|
||||
}
|
||||
|
||||
static address buffer_bottom() {
|
||||
return offset_to_buffered_address<address>(0);
|
||||
}
|
||||
|
||||
static address buffer_top() {
|
||||
return buffer_bottom() + _buffer_top;
|
||||
}
|
||||
|
||||
static bool in_buffer(address buffered_addr) {
|
||||
return (buffer_bottom() <= buffered_addr) && (buffered_addr < buffer_top());
|
||||
}
|
||||
|
||||
static size_t buffered_address_to_offset(address buffered_addr) {
|
||||
assert(in_buffer(buffered_addr), "sanity");
|
||||
return buffered_addr - buffer_bottom();
|
||||
}
|
||||
|
||||
static void copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);
|
||||
static void copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);
|
||||
static void copy_source_objs_to_buffer_by_region(bool copy_open_region);
|
||||
static size_t copy_one_source_obj_to_buffer(oop src_obj);
|
||||
|
||||
static void maybe_fill_gc_region_gap(size_t required_byte_size);
|
||||
static size_t filler_array_byte_size(int length);
|
||||
static int filler_array_length(size_t fill_bytes);
|
||||
static void init_filler_array_at_buffer_top(int array_length, size_t fill_bytes);
|
||||
|
||||
static void set_requested_address_for_regions(GrowableArray<MemRegion>* closed_regions,
|
||||
GrowableArray<MemRegion>* open_regions);
|
||||
static void relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps);
|
||||
static ArchiveHeapBitmapInfo compute_ptrmap(bool is_open);
|
||||
static ArchiveHeapBitmapInfo make_bitmap_info(ResourceBitMap* bitmap, bool is_open, bool is_oopmap);
|
||||
static bool is_in_requested_regions(oop o);
|
||||
static oop requested_obj_from_buffer_offset(size_t offset);
|
||||
|
||||
static oop load_oop_from_buffer(oop* buffered_addr);
|
||||
static oop load_oop_from_buffer(narrowOop* buffered_addr);
|
||||
static void store_oop_in_buffer(oop* buffered_addr, oop requested_obj);
|
||||
static void store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj);
|
||||
|
||||
template <typename T> static oop load_source_oop_from_buffer(T* buffered_addr);
|
||||
template <typename T> static void store_requested_oop_in_buffer(T* buffered_addr, oop request_oop);
|
||||
|
||||
template <typename T> static T* requested_addr_to_buffered_addr(T* p);
|
||||
template <typename T> static void relocate_field_in_buffer(T* field_addr_in_buffer);
|
||||
template <typename T> static void mark_oop_pointer(T* buffered_addr);
|
||||
template <typename T> static void relocate_root_at(oop requested_roots, int index);
|
||||
|
||||
static void update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass);
|
||||
public:
|
||||
static void init() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void add_source_obj(oop src_obj);
|
||||
static bool is_too_large_to_archive(size_t size);
|
||||
static bool is_too_large_to_archive(oop obj);
|
||||
static bool is_string_too_large_to_archive(oop string);
|
||||
static void write(GrowableArrayCHeap<oop, mtClassShared>*,
|
||||
GrowableArray<MemRegion>* closed_regions, GrowableArray<MemRegion>* open_regions,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps);
|
||||
static address heap_region_requested_bottom(int heap_region_idx);
|
||||
static oop heap_roots_requested_address();
|
||||
static address buffered_heap_roots_addr() {
|
||||
return offset_to_buffered_address<address>(_heap_roots_bottom);
|
||||
}
|
||||
static size_t heap_roots_word_size() {
|
||||
return _heap_roots_word_size;
|
||||
}
|
||||
|
||||
static void mark_native_pointer(oop src_obj, int offset);
|
||||
static oop source_obj_to_requested_obj(oop src_obj);
|
||||
static oop buffered_addr_to_source_obj(address buffered_addr);
|
||||
static address buffered_addr_to_requested_addr(address buffered_addr);
|
||||
};
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||
#endif // SHARE_CDS_ARCHIVEHEAPWRITER_HPP
|
@ -276,10 +276,10 @@ void CDSHeapVerifier::trace_to_root(outputStream* st, oop orig_obj) {
|
||||
|
||||
int CDSHeapVerifier::trace_to_root(outputStream* st, oop orig_obj, oop orig_field, HeapShared::CachedOopInfo* info) {
|
||||
int level = 0;
|
||||
if (info->_referrer != nullptr) {
|
||||
HeapShared::CachedOopInfo* ref = HeapShared::archived_object_cache()->get(info->_referrer);
|
||||
if (info->orig_referrer() != nullptr) {
|
||||
HeapShared::CachedOopInfo* ref = HeapShared::archived_object_cache()->get(info->orig_referrer());
|
||||
assert(ref != nullptr, "sanity");
|
||||
level = trace_to_root(st, info->_referrer, orig_obj, ref) + 1;
|
||||
level = trace_to_root(st, info->orig_referrer(), orig_obj, ref) + 1;
|
||||
} else if (java_lang_String::is_instance(orig_obj)) {
|
||||
st->print_cr("[%2d] (shared string table)", level++);
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "cds/archiveBuilder.hpp"
|
||||
#include "cds/archiveHeapLoader.inline.hpp"
|
||||
#include "cds/archiveHeapWriter.hpp"
|
||||
#include "cds/archiveUtils.inline.hpp"
|
||||
#include "cds/cds_globals.hpp"
|
||||
#include "cds/dynamicArchive.hpp"
|
||||
@ -1632,16 +1633,19 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
|
||||
// This is an unused region (e.g., a heap region when !INCLUDE_CDS_JAVA_HEAP)
|
||||
requested_base = nullptr;
|
||||
} else if (HeapShared::is_heap_region(region)) {
|
||||
assert(HeapShared::can_write(), "sanity");
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
assert(!DynamicDumpSharedSpaces, "must be");
|
||||
requested_base = base;
|
||||
requested_base = (char*)ArchiveHeapWriter::heap_region_requested_bottom(region);
|
||||
if (UseCompressedOops) {
|
||||
mapping_offset = (size_t)((address)base - CompressedOops::base());
|
||||
mapping_offset = (size_t)((address)requested_base - CompressedOops::base());
|
||||
assert((mapping_offset >> CompressedOops::shift()) << CompressedOops::shift() == mapping_offset, "must be");
|
||||
} else {
|
||||
#if INCLUDE_G1GC
|
||||
mapping_offset = requested_base - (char*)G1CollectedHeap::heap()->reserved().start();
|
||||
#endif
|
||||
}
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||
} else {
|
||||
char* requested_SharedBaseAddress = (char*)MetaspaceShared::requested_base_address();
|
||||
requested_base = ArchiveBuilder::current()->to_requested(base);
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "cds/archiveBuilder.hpp"
|
||||
#include "cds/archiveHeapLoader.hpp"
|
||||
#include "cds/archiveHeapWriter.hpp"
|
||||
#include "cds/archiveUtils.hpp"
|
||||
#include "cds/cdsHeapVerifier.hpp"
|
||||
#include "cds/heapShared.hpp"
|
||||
@ -61,7 +62,6 @@
|
||||
#include "utilities/copy.hpp"
|
||||
#if INCLUDE_G1GC
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
@ -82,8 +82,8 @@ struct ArchivableStaticFieldInfo {
|
||||
};
|
||||
|
||||
bool HeapShared::_disable_writing = false;
|
||||
bool HeapShared::_copying_open_region_objects = false;
|
||||
DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
|
||||
GrowableArrayCHeap<Metadata**, mtClassShared>* HeapShared::_native_pointers = nullptr;
|
||||
|
||||
size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
|
||||
size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
|
||||
@ -143,14 +143,6 @@ OopHandle HeapShared::_roots;
|
||||
OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
|
||||
KlassToOopHandleTable* HeapShared::_scratch_java_mirror_table = nullptr;
|
||||
|
||||
#ifdef ASSERT
|
||||
bool HeapShared::is_archived_object_during_dumptime(oop p) {
|
||||
assert(HeapShared::can_write(), "must be");
|
||||
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
|
||||
return Universe::heap()->is_archived_object(p);
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
|
||||
for (int i = 0; fields[i].valid(); i++) {
|
||||
if (fields[i].klass == ik) {
|
||||
@ -220,16 +212,10 @@ void HeapShared::reset_archived_object_states(TRAPS) {
|
||||
}
|
||||
|
||||
HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
|
||||
HeapShared::OriginalObjectTable* HeapShared::_original_object_table = nullptr;
|
||||
oop HeapShared::find_archived_heap_object(oop obj) {
|
||||
|
||||
bool HeapShared::has_been_archived(oop obj) {
|
||||
assert(DumpSharedSpaces, "dump-time only");
|
||||
ArchivedObjectCache* cache = archived_object_cache();
|
||||
CachedOopInfo* p = cache->get(obj);
|
||||
if (p != nullptr) {
|
||||
return p->_obj;
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
return archived_object_cache()->get(obj) != nullptr;
|
||||
}
|
||||
|
||||
int HeapShared::append_root(oop obj) {
|
||||
@ -263,19 +249,13 @@ objArrayOop HeapShared::roots() {
|
||||
// Returns an objArray that contains all the roots of the archived objects
|
||||
oop HeapShared::get_root(int index, bool clear) {
|
||||
assert(index >= 0, "sanity");
|
||||
if (DumpSharedSpaces) {
|
||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
||||
assert(_pending_roots != nullptr, "sanity");
|
||||
return _pending_roots->at(index);
|
||||
} else {
|
||||
assert(UseSharedSpaces, "must be");
|
||||
assert(!_roots.is_empty(), "must have loaded shared heap");
|
||||
oop result = roots()->obj_at(index);
|
||||
if (clear) {
|
||||
clear_root(index);
|
||||
}
|
||||
return result;
|
||||
assert(!DumpSharedSpaces && UseSharedSpaces, "runtime only");
|
||||
assert(!_roots.is_empty(), "must have loaded shared heap");
|
||||
oop result = roots()->obj_at(index);
|
||||
if (clear) {
|
||||
clear_root(index);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void HeapShared::clear_root(int index) {
|
||||
@ -290,71 +270,47 @@ void HeapShared::clear_root(int index) {
|
||||
}
|
||||
}
|
||||
|
||||
bool HeapShared::is_too_large_to_archive(oop o) {
|
||||
// TODO: To make the CDS heap mappable for all collectors, this function should
|
||||
// reject objects that may be too large for *any* collector.
|
||||
assert(UseG1GC, "implementation limitation");
|
||||
size_t sz = align_up(o->size() * HeapWordSize, ObjectAlignmentInBytes);
|
||||
size_t max = /*G1*/HeapRegion::min_region_size_in_words() * HeapWordSize;
|
||||
return (sz > max);
|
||||
}
|
||||
|
||||
oop HeapShared::archive_object(oop obj) {
|
||||
bool HeapShared::archive_object(oop obj) {
|
||||
assert(DumpSharedSpaces, "dump-time only");
|
||||
|
||||
assert(!obj->is_stackChunk(), "do not archive stack chunks");
|
||||
|
||||
oop ao = find_archived_heap_object(obj);
|
||||
if (ao != nullptr) {
|
||||
// already archived
|
||||
return ao;
|
||||
if (has_been_archived(obj)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
int len = obj->size();
|
||||
if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
|
||||
if (ArchiveHeapWriter::is_too_large_to_archive(obj->size())) {
|
||||
log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
|
||||
p2i(obj), (size_t)obj->size());
|
||||
return nullptr;
|
||||
}
|
||||
p2i(obj), obj->size());
|
||||
return false;
|
||||
} else {
|
||||
count_allocation(obj->size());
|
||||
ArchiveHeapWriter::add_source_obj(obj);
|
||||
|
||||
oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len));
|
||||
if (archived_oop != nullptr) {
|
||||
count_allocation(len);
|
||||
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
|
||||
// Reinitialize markword to remove age/marking/locking/etc.
|
||||
//
|
||||
// We need to retain the identity_hash, because it may have been used by some hashtables
|
||||
// in the shared heap. This also has the side effect of pre-initializing the
|
||||
// identity_hash for all shared objects, so they are less likely to be written
|
||||
// into during run time, increasing the potential of memory sharing.
|
||||
int hash_original = obj->identity_hash();
|
||||
archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original));
|
||||
assert(archived_oop->mark().is_unlocked(), "sanity");
|
||||
CachedOopInfo info = make_cached_oop_info();
|
||||
archived_object_cache()->put(obj, info);
|
||||
mark_native_pointers(obj);
|
||||
|
||||
DEBUG_ONLY(int hash_archived = archived_oop->identity_hash());
|
||||
assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived);
|
||||
|
||||
ArchivedObjectCache* cache = archived_object_cache();
|
||||
CachedOopInfo info = make_cached_oop_info(archived_oop);
|
||||
cache->put(obj, info);
|
||||
if (_original_object_table != nullptr) {
|
||||
_original_object_table->put(archived_oop, obj);
|
||||
}
|
||||
mark_native_pointers(obj, archived_oop);
|
||||
if (log_is_enabled(Debug, cds, heap)) {
|
||||
ResourceMark rm;
|
||||
log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s",
|
||||
p2i(obj), p2i(archived_oop), obj->klass()->external_name());
|
||||
log_debug(cds, heap)("Archived heap object " PTR_FORMAT " : %s",
|
||||
p2i(obj), obj->klass()->external_name());
|
||||
}
|
||||
} else {
|
||||
log_error(cds, heap)(
|
||||
"Cannot allocate space for object " PTR_FORMAT " in archived heap region",
|
||||
p2i(obj));
|
||||
log_error(cds)("Out of memory. Please run with a larger Java heap, current MaxHeapSize = "
|
||||
SIZE_FORMAT "M", MaxHeapSize/M);
|
||||
os::_exit(-1);
|
||||
|
||||
if (java_lang_Module::is_instance(obj)) {
|
||||
if (Modules::check_module_oop(obj)) {
|
||||
Modules::update_oops_in_archived_module(obj, append_root(obj));
|
||||
}
|
||||
java_lang_Module::set_module_entry(obj, nullptr);
|
||||
} else if (java_lang_ClassLoader::is_instance(obj)) {
|
||||
// class_data will be restored explicitly at run time.
|
||||
guarantee(obj == SystemDictionary::java_platform_loader() ||
|
||||
obj == SystemDictionary::java_system_loader() ||
|
||||
java_lang_ClassLoader::loader_data(obj) == nullptr, "must be");
|
||||
java_lang_ClassLoader::release_set_loader_data(obj, nullptr);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
return archived_oop;
|
||||
}
|
||||
|
||||
class KlassToOopHandleTable: public ResourceHashtable<Klass*, OopHandle,
|
||||
@ -424,14 +380,14 @@ void HeapShared::archive_java_mirrors() {
|
||||
if (!is_reference_type(bt)) {
|
||||
oop m = _scratch_basic_type_mirrors[i].resolve();
|
||||
assert(m != nullptr, "sanity");
|
||||
oop archived_m = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false);
|
||||
assert(archived_m != nullptr, "sanity");
|
||||
bool success = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false);
|
||||
assert(success, "sanity");
|
||||
|
||||
log_trace(cds, heap, mirror)(
|
||||
"Archived %s mirror object from " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
type2name(bt), p2i(m), p2i(archived_m));
|
||||
"Archived %s mirror object from " PTR_FORMAT,
|
||||
type2name(bt), p2i(m));
|
||||
|
||||
Universe::set_archived_basic_type_mirror_index(bt, append_root(archived_m));
|
||||
Universe::set_archived_basic_type_mirror_index(bt, append_root(m));
|
||||
}
|
||||
}
|
||||
|
||||
@ -442,23 +398,23 @@ void HeapShared::archive_java_mirrors() {
|
||||
oop m = scratch_java_mirror(orig_k);
|
||||
if (m != nullptr) {
|
||||
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
|
||||
oop archived_m = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false);
|
||||
guarantee(archived_m != nullptr, "scratch mirrors should not point to any unachivable objects");
|
||||
buffered_k->set_archived_java_mirror(append_root(archived_m));
|
||||
bool success = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false);
|
||||
guarantee(success, "scratch mirrors should not point to any unachivable objects");
|
||||
buffered_k->set_archived_java_mirror(append_root(m));
|
||||
ResourceMark rm;
|
||||
log_trace(cds, heap, mirror)(
|
||||
"Archived %s mirror object from " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
buffered_k->external_name(), p2i(m), p2i(archived_m));
|
||||
"Archived %s mirror object from " PTR_FORMAT,
|
||||
buffered_k->external_name(), p2i(m));
|
||||
|
||||
// archive the resolved_referenes array
|
||||
if (buffered_k->is_instance_klass()) {
|
||||
InstanceKlass* ik = InstanceKlass::cast(buffered_k);
|
||||
oop rr = ik->constants()->prepare_resolved_references_for_archiving();
|
||||
if (rr != nullptr && !is_too_large_to_archive(rr)) {
|
||||
oop archived_obj = HeapShared::archive_reachable_objects_from(1, _default_subgraph_info, rr,
|
||||
/*is_closed_archive=*/false);
|
||||
assert(archived_obj != nullptr, "already checked not too large to archive");
|
||||
int root_index = append_root(archived_obj);
|
||||
if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) {
|
||||
bool success = HeapShared::archive_reachable_objects_from(1, _default_subgraph_info, rr,
|
||||
/*is_closed_archive=*/false);
|
||||
assert(success, "must be");
|
||||
int root_index = append_root(rr);
|
||||
ik->constants()->cache()->set_archived_references(root_index);
|
||||
}
|
||||
}
|
||||
@ -468,29 +424,10 @@ void HeapShared::archive_java_mirrors() {
|
||||
delete_seen_objects_table();
|
||||
}
|
||||
|
||||
void HeapShared::mark_native_pointers(oop orig_obj, oop archived_obj) {
|
||||
void HeapShared::mark_native_pointers(oop orig_obj) {
|
||||
if (java_lang_Class::is_instance(orig_obj)) {
|
||||
mark_one_native_pointer(archived_obj, java_lang_Class::klass_offset());
|
||||
mark_one_native_pointer(archived_obj, java_lang_Class::array_klass_offset());
|
||||
}
|
||||
}
|
||||
|
||||
void HeapShared::mark_one_native_pointer(oop archived_obj, int offset) {
|
||||
Metadata* ptr = archived_obj->metadata_field_acquire(offset);
|
||||
if (ptr != nullptr) {
|
||||
// Set the native pointer to the requested address (at runtime, if the metadata
|
||||
// is mapped at the default location, it will be at this address).
|
||||
address buffer_addr = ArchiveBuilder::current()->get_buffered_addr((address)ptr);
|
||||
address requested_addr = ArchiveBuilder::current()->to_requested(buffer_addr);
|
||||
archived_obj->metadata_field_put(offset, (Metadata*)requested_addr);
|
||||
|
||||
// Remember this pointer. At runtime, if the metadata is mapped at a non-default
|
||||
// location, the pointer needs to be patched (see ArchiveHeapLoader::patch_native_pointers()).
|
||||
_native_pointers->append(archived_obj->field_addr<Metadata*>(offset));
|
||||
|
||||
log_debug(cds, heap, mirror)(
|
||||
"Marked metadata field at %d: " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
offset, p2i(ptr), p2i(requested_addr));
|
||||
ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::klass_offset());
|
||||
ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::array_klass_offset());
|
||||
}
|
||||
}
|
||||
|
||||
@ -517,6 +454,7 @@ void HeapShared::check_enum_obj(int level,
|
||||
KlassSubGraphInfo* subgraph_info,
|
||||
oop orig_obj,
|
||||
bool is_closed_archive) {
|
||||
assert(level > 1, "must never be called at the first (outermost) level");
|
||||
Klass* k = orig_obj->klass();
|
||||
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(k);
|
||||
if (!k->is_instance_klass()) {
|
||||
@ -544,11 +482,12 @@ void HeapShared::check_enum_obj(int level,
|
||||
guarantee(false, "static field %s::%s is of the wrong type",
|
||||
ik->external_name(), fd.name()->as_C_string());
|
||||
}
|
||||
oop archived_oop_field = archive_reachable_objects_from(level, subgraph_info, oop_field, is_closed_archive);
|
||||
int root_index = append_root(archived_oop_field);
|
||||
log_info(cds, heap)("Archived enum obj @%d %s::%s (" INTPTR_FORMAT " -> " INTPTR_FORMAT ")",
|
||||
bool success = archive_reachable_objects_from(level, subgraph_info, oop_field, is_closed_archive);
|
||||
assert(success, "VM should have exited with unarchivable objects for _level > 1");
|
||||
int root_index = append_root(oop_field);
|
||||
log_info(cds, heap)("Archived enum obj @%d %s::%s (" INTPTR_FORMAT ")",
|
||||
root_index, ik->external_name(), fd.name()->as_C_string(),
|
||||
p2i((oopDesc*)oop_field), p2i((oopDesc*)archived_oop_field));
|
||||
p2i((oopDesc*)oop_field));
|
||||
SystemDictionaryShared::add_enum_klass_static_field(ik, root_index);
|
||||
}
|
||||
}
|
||||
@ -582,37 +521,17 @@ bool HeapShared::initialize_enum_klass(InstanceKlass* k, TRAPS) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void HeapShared::run_full_gc_in_vm_thread() {
|
||||
if (HeapShared::can_write()) {
|
||||
// Avoid fragmentation while archiving heap objects.
|
||||
// We do this inside a safepoint, so that no further allocation can happen after GC
|
||||
// has finished.
|
||||
if (GCLocker::is_active()) {
|
||||
// Just checking for safety ...
|
||||
// This should not happen during -Xshare:dump. If you see this, probably the Java core lib
|
||||
// has been modified such that JNI code is executed in some clean up threads after
|
||||
// we have finished class loading.
|
||||
log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results.");
|
||||
} else {
|
||||
log_info(cds)("Run GC ...");
|
||||
Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc);
|
||||
log_info(cds)("Run GC done");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void HeapShared::archive_objects(GrowableArray<MemRegion>* closed_regions,
|
||||
GrowableArray<MemRegion>* open_regions) {
|
||||
|
||||
G1HeapVerifier::verify_ready_for_archiving();
|
||||
|
||||
GrowableArray<MemRegion>* open_regions,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps) {
|
||||
{
|
||||
NoSafepointVerifier nsv;
|
||||
|
||||
_default_subgraph_info = init_subgraph_info(vmClasses::Object_klass(), false);
|
||||
|
||||
// Cache for recording where the archived objects are copied to
|
||||
create_archived_object_cache(log_is_enabled(Info, cds, map));
|
||||
create_archived_object_cache();
|
||||
|
||||
log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
|
||||
UseCompressedOops ? p2i(CompressedOops::begin()) :
|
||||
@ -620,16 +539,18 @@ void HeapShared::archive_objects(GrowableArray<MemRegion>* closed_regions,
|
||||
UseCompressedOops ? p2i(CompressedOops::end()) :
|
||||
p2i((address)G1CollectedHeap::heap()->reserved().end()));
|
||||
log_info(cds)("Dumping objects to closed archive heap region ...");
|
||||
copy_closed_objects(closed_regions);
|
||||
copy_closed_objects();
|
||||
|
||||
_copying_open_region_objects = true;
|
||||
|
||||
log_info(cds)("Dumping objects to open archive heap region ...");
|
||||
copy_open_objects(open_regions);
|
||||
copy_open_objects();
|
||||
|
||||
CDSHeapVerifier::verify();
|
||||
check_default_subgraph_classes();
|
||||
}
|
||||
|
||||
G1HeapVerifier::verify_archive_regions();
|
||||
ArchiveHeapWriter::write(_pending_roots, closed_regions, open_regions, closed_bitmaps, open_bitmaps);
|
||||
StringTable::write_shared_table(_dumped_interned_strings);
|
||||
}
|
||||
|
||||
@ -638,14 +559,13 @@ void HeapShared::copy_interned_strings() {
|
||||
|
||||
auto copier = [&] (oop s, bool value_ignored) {
|
||||
assert(s != nullptr, "sanity");
|
||||
typeArrayOop value = java_lang_String::value_no_keepalive(s);
|
||||
if (!HeapShared::is_too_large_to_archive(value)) {
|
||||
oop archived_s = archive_reachable_objects_from(1, _default_subgraph_info,
|
||||
s, /*is_closed_archive=*/true);
|
||||
assert(archived_s != nullptr, "already checked not too large to archive");
|
||||
if (!ArchiveHeapWriter::is_string_too_large_to_archive(s)) {
|
||||
bool success = archive_reachable_objects_from(1, _default_subgraph_info,
|
||||
s, /*is_closed_archive=*/true);
|
||||
assert(success, "must be");
|
||||
// Prevent string deduplication from changing the value field to
|
||||
// something not in the archive.
|
||||
java_lang_String::set_deduplication_forbidden(archived_s);
|
||||
java_lang_String::set_deduplication_forbidden(s);
|
||||
}
|
||||
};
|
||||
_dumped_interned_strings->iterate_all(copier);
|
||||
@ -653,27 +573,20 @@ void HeapShared::copy_interned_strings() {
|
||||
delete_seen_objects_table();
|
||||
}
|
||||
|
||||
void HeapShared::copy_closed_objects(GrowableArray<MemRegion>* closed_regions) {
|
||||
void HeapShared::copy_closed_objects() {
|
||||
assert(HeapShared::can_write(), "must be");
|
||||
|
||||
G1CollectedHeap::heap()->begin_archive_alloc_range();
|
||||
|
||||
// Archive interned string objects
|
||||
copy_interned_strings();
|
||||
|
||||
archive_object_subgraphs(closed_archive_subgraph_entry_fields,
|
||||
true /* is_closed_archive */,
|
||||
false /* is_full_module_graph */);
|
||||
|
||||
G1CollectedHeap::heap()->end_archive_alloc_range(closed_regions,
|
||||
os::vm_allocation_granularity());
|
||||
}
|
||||
|
||||
void HeapShared::copy_open_objects(GrowableArray<MemRegion>* open_regions) {
|
||||
void HeapShared::copy_open_objects() {
|
||||
assert(HeapShared::can_write(), "must be");
|
||||
|
||||
G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
|
||||
|
||||
archive_java_mirrors();
|
||||
|
||||
archive_object_subgraphs(open_archive_subgraph_entry_fields,
|
||||
@ -685,43 +598,6 @@ void HeapShared::copy_open_objects(GrowableArray<MemRegion>* open_regions) {
|
||||
true /* is_full_module_graph */);
|
||||
Modules::verify_archived_modules();
|
||||
}
|
||||
|
||||
copy_roots();
|
||||
|
||||
G1CollectedHeap::heap()->end_archive_alloc_range(open_regions,
|
||||
os::vm_allocation_granularity());
|
||||
}
|
||||
|
||||
// Copy _pending_archive_roots into an objArray
|
||||
void HeapShared::copy_roots() {
|
||||
// HeapShared::roots() points into an ObjArray in the open archive region. A portion of the
|
||||
// objects in this array are discovered during HeapShared::archive_objects(). For example,
|
||||
// in HeapShared::archive_reachable_objects_from() -> HeapShared::check_enum_obj().
|
||||
// However, HeapShared::archive_objects() happens inside a safepoint, so we can't
|
||||
// allocate a "regular" ObjArray and pass the result to HeapShared::archive_object().
|
||||
// Instead, we have to roll our own alloc/copy routine here.
|
||||
int length = _pending_roots != nullptr ? _pending_roots->length() : 0;
|
||||
size_t size = objArrayOopDesc::object_size(length);
|
||||
Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
|
||||
HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size);
|
||||
|
||||
memset(mem, 0, size * BytesPerWord);
|
||||
{
|
||||
// This is copied from MemAllocator::finish
|
||||
oopDesc::set_mark(mem, markWord::prototype());
|
||||
oopDesc::release_set_klass(mem, k);
|
||||
}
|
||||
{
|
||||
// This is copied from ObjArrayAllocator::initialize
|
||||
arrayOopDesc::set_length(mem, length);
|
||||
}
|
||||
|
||||
_roots = OopHandle(Universe::vm_global(), cast_to_oop(mem));
|
||||
for (int i = 0; i < length; i++) {
|
||||
roots()->obj_at_put(i, _pending_roots->at(i));
|
||||
}
|
||||
log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " words, klass = %p, obj = %p", length, size, k, mem);
|
||||
count_allocation(roots()->size());
|
||||
}
|
||||
|
||||
//
|
||||
@ -985,7 +861,9 @@ void HeapShared::serialize_root(SerializeClosure* soc) {
|
||||
}
|
||||
} else {
|
||||
// writing
|
||||
roots_oop = roots();
|
||||
if (HeapShared::can_write()) {
|
||||
roots_oop = ArchiveHeapWriter::heap_roots_requested_address();
|
||||
}
|
||||
soc->do_oop(&roots_oop); // write to archive
|
||||
}
|
||||
}
|
||||
@ -1223,8 +1101,7 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
|
||||
bool _is_closed_archive;
|
||||
bool _record_klasses_only;
|
||||
KlassSubGraphInfo* _subgraph_info;
|
||||
oop _orig_referencing_obj;
|
||||
oop _archived_referencing_obj;
|
||||
oop _referencing_obj;
|
||||
|
||||
// The following are for maintaining a stack for determining
|
||||
// CachedOopInfo::_referrer
|
||||
@ -1235,11 +1112,11 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
|
||||
bool is_closed_archive,
|
||||
bool record_klasses_only,
|
||||
KlassSubGraphInfo* subgraph_info,
|
||||
oop orig, oop archived) :
|
||||
oop orig) :
|
||||
_level(level), _is_closed_archive(is_closed_archive),
|
||||
_record_klasses_only(record_klasses_only),
|
||||
_subgraph_info(subgraph_info),
|
||||
_orig_referencing_obj(orig), _archived_referencing_obj(archived) {
|
||||
_referencing_obj(orig) {
|
||||
_last = _current;
|
||||
_current = this;
|
||||
}
|
||||
@ -1253,16 +1130,12 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(obj)) {
|
||||
assert(!HeapShared::is_archived_object_during_dumptime(obj),
|
||||
"original objects must not point to archived objects");
|
||||
|
||||
size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char));
|
||||
T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta);
|
||||
size_t field_delta = pointer_delta(p, _referencing_obj, sizeof(char));
|
||||
|
||||
if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
|
||||
ResourceMark rm;
|
||||
log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size " SIZE_FORMAT " %s", _level,
|
||||
_orig_referencing_obj->klass()->external_name(), field_delta,
|
||||
_referencing_obj->klass()->external_name(), field_delta,
|
||||
p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
|
||||
if (log_is_enabled(Trace, cds, heap)) {
|
||||
LogTarget(Trace, cds, heap) log;
|
||||
@ -1271,37 +1144,24 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
|
||||
}
|
||||
}
|
||||
|
||||
oop archived = HeapShared::archive_reachable_objects_from(
|
||||
bool success = HeapShared::archive_reachable_objects_from(
|
||||
_level + 1, _subgraph_info, obj, _is_closed_archive);
|
||||
assert(archived != nullptr, "VM should have exited with unarchivable objects for _level > 1");
|
||||
assert(HeapShared::is_archived_object_during_dumptime(archived), "must be");
|
||||
|
||||
if (!_record_klasses_only) {
|
||||
// Update the reference in the archived copy of the referencing object.
|
||||
log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
_level, p2i(new_p), p2i(obj), p2i(archived));
|
||||
RawAccess<IS_NOT_NULL>::oop_store(new_p, archived);
|
||||
}
|
||||
assert(success, "VM should have exited with unarchivable objects for _level > 1");
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
static WalkOopAndArchiveClosure* current() { return _current; }
|
||||
oop orig_referencing_obj() { return _orig_referencing_obj; }
|
||||
oop referencing_obj() { return _referencing_obj; }
|
||||
KlassSubGraphInfo* subgraph_info() { return _subgraph_info; }
|
||||
};
|
||||
|
||||
WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = nullptr;
|
||||
|
||||
HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop orig_obj) {
|
||||
CachedOopInfo info;
|
||||
HeapShared::CachedOopInfo HeapShared::make_cached_oop_info() {
|
||||
WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
|
||||
|
||||
info._subgraph_info = (walker == nullptr) ? nullptr : walker->subgraph_info();
|
||||
info._referrer = (walker == nullptr) ? nullptr : walker->orig_referencing_obj();
|
||||
info._obj = orig_obj;
|
||||
|
||||
return info;
|
||||
oop referrer = (walker == nullptr) ? nullptr : walker->referencing_obj();
|
||||
return CachedOopInfo(referrer, _copying_open_region_objects);
|
||||
}
|
||||
|
||||
void HeapShared::check_closed_region_object(InstanceKlass* k) {
|
||||
@ -1324,12 +1184,11 @@ void HeapShared::check_closed_region_object(InstanceKlass* k) {
|
||||
// (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
|
||||
// trace all objects that are reachable from it, and make sure these objects are archived.
|
||||
// (3) Record the klasses of all orig_obj and all reachable objects.
|
||||
oop HeapShared::archive_reachable_objects_from(int level,
|
||||
KlassSubGraphInfo* subgraph_info,
|
||||
oop orig_obj,
|
||||
bool is_closed_archive) {
|
||||
bool HeapShared::archive_reachable_objects_from(int level,
|
||||
KlassSubGraphInfo* subgraph_info,
|
||||
oop orig_obj,
|
||||
bool is_closed_archive) {
|
||||
assert(orig_obj != nullptr, "must be");
|
||||
assert(!is_archived_object_during_dumptime(orig_obj), "sanity");
|
||||
|
||||
if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
|
||||
// This object has injected fields that cannot be supported easily, so we disallow them for now.
|
||||
@ -1350,25 +1209,18 @@ oop HeapShared::archive_reachable_objects_from(int level,
|
||||
os::_exit(1);
|
||||
}
|
||||
|
||||
oop archived_obj = find_archived_heap_object(orig_obj);
|
||||
if (java_lang_String::is_instance(orig_obj) && archived_obj != nullptr) {
|
||||
// To save time, don't walk strings that are already archived. They just contain
|
||||
// pointers to a type array, whose klass doesn't need to be recorded.
|
||||
return archived_obj;
|
||||
}
|
||||
|
||||
if (has_been_seen_during_subgraph_recording(orig_obj)) {
|
||||
// orig_obj has already been archived and traced. Nothing more to do.
|
||||
return archived_obj;
|
||||
return true;
|
||||
} else {
|
||||
set_has_been_seen_during_subgraph_recording(orig_obj);
|
||||
}
|
||||
|
||||
bool record_klasses_only = (archived_obj != nullptr);
|
||||
if (archived_obj == nullptr) {
|
||||
bool already_archived = has_been_archived(orig_obj);
|
||||
bool record_klasses_only = already_archived;
|
||||
if (!already_archived) {
|
||||
++_num_new_archived_objs;
|
||||
archived_obj = archive_object(orig_obj);
|
||||
if (archived_obj == nullptr) {
|
||||
if (!archive_object(orig_obj)) {
|
||||
// Skip archiving the sub-graph referenced from the current entry field.
|
||||
ResourceMark rm;
|
||||
log_error(cds, heap)(
|
||||
@ -1378,7 +1230,7 @@ oop HeapShared::archive_reachable_objects_from(int level,
|
||||
if (level == 1) {
|
||||
// Don't archive a subgraph root that's too big. For archives static fields, that's OK
|
||||
// as the Java code will take care of initializing this field dynamically.
|
||||
return nullptr;
|
||||
return false;
|
||||
} else {
|
||||
// We don't know how to handle an object that has been archived, but some of its reachable
|
||||
// objects cannot be archived. Bail out for now. We might need to fix this in the future if
|
||||
@ -1386,34 +1238,20 @@ oop HeapShared::archive_reachable_objects_from(int level,
|
||||
os::_exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (java_lang_Module::is_instance(orig_obj)) {
|
||||
if (Modules::check_module_oop(orig_obj)) {
|
||||
Modules::update_oops_in_archived_module(orig_obj, append_root(archived_obj));
|
||||
}
|
||||
java_lang_Module::set_module_entry(archived_obj, nullptr);
|
||||
} else if (java_lang_ClassLoader::is_instance(orig_obj)) {
|
||||
// class_data will be restored explicitly at run time.
|
||||
guarantee(orig_obj == SystemDictionary::java_platform_loader() ||
|
||||
orig_obj == SystemDictionary::java_system_loader() ||
|
||||
java_lang_ClassLoader::loader_data(orig_obj) == nullptr, "must be");
|
||||
java_lang_ClassLoader::release_set_loader_data(archived_obj, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
assert(archived_obj != nullptr, "must be");
|
||||
Klass *orig_k = orig_obj->klass();
|
||||
subgraph_info->add_subgraph_object_klass(orig_k);
|
||||
|
||||
WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only,
|
||||
subgraph_info, orig_obj, archived_obj);
|
||||
subgraph_info, orig_obj);
|
||||
orig_obj->oop_iterate(&walker);
|
||||
if (is_closed_archive && orig_k->is_instance_klass()) {
|
||||
check_closed_region_object(InstanceKlass::cast(orig_k));
|
||||
}
|
||||
|
||||
check_enum_obj(level + 1, subgraph_info, orig_obj, is_closed_archive);
|
||||
return archived_obj;
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
@ -1472,17 +1310,17 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
|
||||
f->print_on(&out);
|
||||
}
|
||||
|
||||
oop af = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive);
|
||||
bool success = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive);
|
||||
|
||||
if (af == nullptr) {
|
||||
if (!success) {
|
||||
log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
|
||||
klass_name, field_name);
|
||||
} else {
|
||||
// Note: the field value is not preserved in the archived mirror.
|
||||
// Record the field as a new subGraph entry point. The recorded
|
||||
// information is restored from the archive at runtime.
|
||||
subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive);
|
||||
log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af));
|
||||
subgraph_info->add_subgraph_entry_field(field_offset, f, is_closed_archive);
|
||||
log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(f));
|
||||
}
|
||||
} else {
|
||||
// The field contains null, we still need to record the entry point,
|
||||
@ -1493,12 +1331,7 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
|
||||
|
||||
#ifndef PRODUCT
|
||||
class VerifySharedOopClosure: public BasicOopIterateClosure {
|
||||
private:
|
||||
bool _is_archived;
|
||||
|
||||
public:
|
||||
VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {}
|
||||
|
||||
void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); }
|
||||
void do_oop( oop *p) { VerifySharedOopClosure::do_oop_work(p); }
|
||||
|
||||
@ -1506,7 +1339,7 @@ class VerifySharedOopClosure: public BasicOopIterateClosure {
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(obj)) {
|
||||
HeapShared::verify_reachable_objects_from(obj, _is_archived);
|
||||
HeapShared::verify_reachable_objects_from(obj);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -1523,8 +1356,7 @@ void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_o
|
||||
}
|
||||
|
||||
void HeapShared::verify_subgraph_from(oop orig_obj) {
|
||||
oop archived_obj = find_archived_heap_object(orig_obj);
|
||||
if (archived_obj == nullptr) {
|
||||
if (!has_been_archived(orig_obj)) {
|
||||
// It's OK for the root of a subgraph to be not archived. See comments in
|
||||
// archive_reachable_objects_from().
|
||||
return;
|
||||
@ -1532,32 +1364,16 @@ void HeapShared::verify_subgraph_from(oop orig_obj) {
|
||||
|
||||
// Verify that all objects reachable from orig_obj are archived.
|
||||
init_seen_objects_table();
|
||||
verify_reachable_objects_from(orig_obj, false);
|
||||
verify_reachable_objects_from(orig_obj);
|
||||
delete_seen_objects_table();
|
||||
|
||||
// Note: we could also verify that all objects reachable from the archived
|
||||
// copy of orig_obj can only point to archived objects, with:
|
||||
// init_seen_objects_table();
|
||||
// verify_reachable_objects_from(archived_obj, true);
|
||||
// init_seen_objects_table();
|
||||
// but that's already done in G1HeapVerifier::verify_archive_regions so we
|
||||
// won't do it here.
|
||||
}
|
||||
|
||||
void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
|
||||
void HeapShared::verify_reachable_objects_from(oop obj) {
|
||||
_num_total_verifications ++;
|
||||
if (!has_been_seen_during_subgraph_recording(obj)) {
|
||||
set_has_been_seen_during_subgraph_recording(obj);
|
||||
|
||||
if (is_archived) {
|
||||
assert(is_archived_object_during_dumptime(obj), "must be");
|
||||
assert(find_archived_heap_object(obj) == nullptr, "must be");
|
||||
} else {
|
||||
assert(!is_archived_object_during_dumptime(obj), "must be");
|
||||
assert(find_archived_heap_object(obj) != nullptr, "must be");
|
||||
}
|
||||
|
||||
VerifySharedOopClosure walker(is_archived);
|
||||
assert(has_been_archived(obj), "must be");
|
||||
VerifySharedOopClosure walker;
|
||||
obj->oop_iterate(&walker);
|
||||
}
|
||||
}
|
||||
@ -1811,7 +1627,6 @@ void HeapShared::init_for_dumping(TRAPS) {
|
||||
if (HeapShared::can_write()) {
|
||||
setup_test_class(ArchiveHeapTestClass);
|
||||
_dumped_interned_strings = new (mtClass)DumpedInternedStrings();
|
||||
_native_pointers = new GrowableArrayCHeap<Metadata**, mtClassShared>(2048);
|
||||
init_subgraph_entry_fields(CHECK);
|
||||
}
|
||||
}
|
||||
@ -1877,10 +1692,12 @@ void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
|
||||
// [2] included in the SharedArchiveConfigFile.
|
||||
void HeapShared::add_to_dumped_interned_strings(oop string) {
|
||||
assert_at_safepoint(); // DumpedInternedStrings uses raw oops
|
||||
assert(!ArchiveHeapWriter::is_string_too_large_to_archive(string), "must be");
|
||||
bool created;
|
||||
_dumped_interned_strings->put_if_absent(string, true, &created);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// At dump-time, find the location of all the non-null oop pointers in an archived heap
|
||||
// region. This way we can quickly relocate all the pointers without using
|
||||
// BasicOopIterateClosure at runtime.
|
||||
@ -1912,10 +1729,6 @@ class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
|
||||
if ((*p) != nullptr) {
|
||||
size_t idx = p - (oop*)_start;
|
||||
_oopmap->set_bit(idx);
|
||||
if (DumpSharedSpaces) {
|
||||
// Make heap content deterministic.
|
||||
*p = HeapShared::to_requested_address(*p);
|
||||
}
|
||||
} else {
|
||||
_num_null_oops ++;
|
||||
}
|
||||
@ -1923,7 +1736,7 @@ class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
|
||||
int num_total_oops() const { return _num_total_oops; }
|
||||
int num_null_oops() const { return _num_null_oops; }
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
address HeapShared::to_requested_address(address dumptime_addr) {
|
||||
assert(DumpSharedSpaces, "static dump time only");
|
||||
@ -1952,6 +1765,7 @@ address HeapShared::to_requested_address(address dumptime_addr) {
|
||||
return requested_addr;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
|
||||
size_t num_bits = region.byte_size() / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
|
||||
ResourceBitMap oopmap(num_bits);
|
||||
@ -1959,16 +1773,12 @@ ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
|
||||
HeapWord* p = region.start();
|
||||
HeapWord* end = region.end();
|
||||
FindEmbeddedNonNullPointers finder((void*)p, &oopmap);
|
||||
ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : nullptr;
|
||||
|
||||
int num_objs = 0;
|
||||
while (p < end) {
|
||||
oop o = cast_to_oop(p);
|
||||
o->oop_iterate(&finder);
|
||||
p += o->size();
|
||||
if (DumpSharedSpaces) {
|
||||
builder->relocate_klass_ptr_of_oop(o);
|
||||
}
|
||||
++ num_objs;
|
||||
}
|
||||
|
||||
@ -1977,34 +1787,7 @@ ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
|
||||
return oopmap;
|
||||
}
|
||||
|
||||
|
||||
ResourceBitMap HeapShared::calculate_ptrmap(MemRegion region) {
|
||||
size_t num_bits = region.byte_size() / sizeof(Metadata*);
|
||||
ResourceBitMap oopmap(num_bits);
|
||||
|
||||
Metadata** start = (Metadata**)region.start();
|
||||
Metadata** end = (Metadata**)region.end();
|
||||
|
||||
int num_non_null_ptrs = 0;
|
||||
int len = _native_pointers->length();
|
||||
for (int i = 0; i < len; i++) {
|
||||
Metadata** p = _native_pointers->at(i);
|
||||
if (start <= p && p < end) {
|
||||
assert(*p != nullptr, "must be non-null");
|
||||
num_non_null_ptrs ++;
|
||||
size_t idx = p - start;
|
||||
oopmap.set_bit(idx);
|
||||
}
|
||||
}
|
||||
|
||||
log_info(cds, heap)("calculate_ptrmap: marked %d non-null native pointers out of "
|
||||
SIZE_FORMAT " possible locations", num_non_null_ptrs, num_bits);
|
||||
if (num_non_null_ptrs > 0) {
|
||||
return oopmap;
|
||||
} else {
|
||||
return ResourceBitMap(0);
|
||||
}
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
void HeapShared::count_allocation(size_t size) {
|
||||
_total_obj_count ++;
|
||||
|
@ -165,8 +165,8 @@ public:
|
||||
private:
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
static bool _disable_writing;
|
||||
static bool _copying_open_region_objects;
|
||||
static DumpedInternedStrings *_dumped_interned_strings;
|
||||
static GrowableArrayCHeap<Metadata**, mtClassShared>* _native_pointers;
|
||||
|
||||
// statistics
|
||||
constexpr static int ALLOC_STAT_SLOTS = 16;
|
||||
@ -183,11 +183,21 @@ public:
|
||||
return java_lang_String::hash_code(string);
|
||||
}
|
||||
|
||||
struct CachedOopInfo {
|
||||
KlassSubGraphInfo* _subgraph_info;
|
||||
oop _referrer;
|
||||
oop _obj;
|
||||
CachedOopInfo() :_subgraph_info(), _referrer(), _obj() {}
|
||||
class CachedOopInfo {
|
||||
// See "TEMP notes: What are these?" in archiveHeapWriter.hpp
|
||||
oop _orig_referrer;
|
||||
|
||||
// The location of this object inside ArchiveHeapWriter::_buffer
|
||||
size_t _buffer_offset;
|
||||
bool _in_open_region;
|
||||
public:
|
||||
CachedOopInfo(oop orig_referrer, bool in_open_region)
|
||||
: _orig_referrer(orig_referrer),
|
||||
_buffer_offset(0), _in_open_region(in_open_region) {}
|
||||
oop orig_referrer() const { return _orig_referrer; }
|
||||
bool in_open_region() const { return _in_open_region; }
|
||||
void set_buffer_offset(size_t offset) { _buffer_offset = offset; }
|
||||
size_t buffer_offset() const { return _buffer_offset; }
|
||||
};
|
||||
|
||||
private:
|
||||
@ -203,13 +213,6 @@ private:
|
||||
HeapShared::oop_hash> ArchivedObjectCache;
|
||||
static ArchivedObjectCache* _archived_object_cache;
|
||||
|
||||
typedef ResourceHashtable<oop, oop,
|
||||
36137, // prime number
|
||||
AnyObj::C_HEAP,
|
||||
mtClassShared,
|
||||
HeapShared::oop_hash> OriginalObjectTable;
|
||||
static OriginalObjectTable* _original_object_table;
|
||||
|
||||
class DumpTimeKlassSubGraphInfoTable
|
||||
: public ResourceHashtable<Klass*, KlassSubGraphInfo,
|
||||
137, // prime number
|
||||
@ -237,7 +240,7 @@ private:
|
||||
static RunTimeKlassSubGraphInfoTable _run_time_subgraph_info_table;
|
||||
|
||||
static void check_closed_region_object(InstanceKlass* k);
|
||||
static CachedOopInfo make_cached_oop_info(oop orig_obj);
|
||||
static CachedOopInfo make_cached_oop_info();
|
||||
static void archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
|
||||
bool is_closed_archive,
|
||||
bool is_full_module_graph);
|
||||
@ -251,7 +254,7 @@ private:
|
||||
|
||||
static void verify_subgraph_from_static_field(
|
||||
InstanceKlass* k, int field_offset) PRODUCT_RETURN;
|
||||
static void verify_reachable_objects_from(oop obj, bool is_archived) PRODUCT_RETURN;
|
||||
static void verify_reachable_objects_from(oop obj) PRODUCT_RETURN;
|
||||
static void verify_subgraph_from(oop orig_obj) PRODUCT_RETURN;
|
||||
static void check_default_subgraph_classes();
|
||||
|
||||
@ -316,7 +319,7 @@ private:
|
||||
|
||||
static bool has_been_seen_during_subgraph_recording(oop obj);
|
||||
static void set_has_been_seen_during_subgraph_recording(oop obj);
|
||||
static oop archive_object(oop obj);
|
||||
static bool archive_object(oop obj);
|
||||
|
||||
static void copy_interned_strings();
|
||||
static void copy_roots();
|
||||
@ -338,58 +341,36 @@ private:
|
||||
static void init_loaded_heap_relocation(LoadedArchiveHeapRegion* reloc_info,
|
||||
int num_loaded_regions);
|
||||
static void fill_failed_loaded_region();
|
||||
static void mark_native_pointers(oop orig_obj, oop archived_obj);
|
||||
static void mark_one_native_pointer(oop archived_obj, int offset);
|
||||
static void mark_native_pointers(oop orig_obj);
|
||||
static bool has_been_archived(oop orig_obj);
|
||||
static void archive_java_mirrors();
|
||||
public:
|
||||
static void reset_archived_object_states(TRAPS);
|
||||
static void create_archived_object_cache(bool create_orig_table) {
|
||||
static void create_archived_object_cache() {
|
||||
_archived_object_cache =
|
||||
new (mtClass)ArchivedObjectCache();
|
||||
if (create_orig_table) {
|
||||
_original_object_table =
|
||||
new (mtClass)OriginalObjectTable();
|
||||
} else {
|
||||
_original_object_table = nullptr;
|
||||
}
|
||||
}
|
||||
static void destroy_archived_object_cache() {
|
||||
delete _archived_object_cache;
|
||||
_archived_object_cache = nullptr;
|
||||
if (_original_object_table != nullptr) {
|
||||
delete _original_object_table;
|
||||
_original_object_table = nullptr;
|
||||
}
|
||||
}
|
||||
static ArchivedObjectCache* archived_object_cache() {
|
||||
return _archived_object_cache;
|
||||
}
|
||||
static oop get_original_object(oop archived_object) {
|
||||
assert(_original_object_table != nullptr, "sanity");
|
||||
oop* r = _original_object_table->get(archived_object);
|
||||
if (r == nullptr) {
|
||||
return nullptr;
|
||||
} else {
|
||||
return *r;
|
||||
}
|
||||
}
|
||||
|
||||
static bool is_too_large_to_archive(oop o);
|
||||
static oop find_archived_heap_object(oop obj);
|
||||
|
||||
static void archive_java_mirrors();
|
||||
|
||||
static void archive_objects(GrowableArray<MemRegion>* closed_regions,
|
||||
GrowableArray<MemRegion>* open_regions);
|
||||
static void copy_closed_objects(GrowableArray<MemRegion>* closed_regions);
|
||||
static void copy_open_objects(GrowableArray<MemRegion>* open_regions);
|
||||
GrowableArray<MemRegion>* open_regions,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps);
|
||||
static void copy_closed_objects();
|
||||
static void copy_open_objects();
|
||||
|
||||
static oop archive_reachable_objects_from(int level,
|
||||
KlassSubGraphInfo* subgraph_info,
|
||||
oop orig_obj,
|
||||
bool is_closed_archive);
|
||||
static bool archive_reachable_objects_from(int level,
|
||||
KlassSubGraphInfo* subgraph_info,
|
||||
oop orig_obj,
|
||||
bool is_closed_archive);
|
||||
|
||||
static ResourceBitMap calculate_oopmap(MemRegion region); // marks all the oop pointers
|
||||
static ResourceBitMap calculate_ptrmap(MemRegion region); // marks all the native pointers
|
||||
static void add_to_dumped_interned_strings(oop string);
|
||||
|
||||
// Scratch objects for archiving Klass::java_mirror()
|
||||
@ -426,16 +407,12 @@ private:
|
||||
|
||||
public:
|
||||
static void init_scratch_objects(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void run_full_gc_in_vm_thread() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
|
||||
static bool is_heap_region(int idx) {
|
||||
CDS_JAVA_HEAP_ONLY(return (idx >= MetaspaceShared::first_closed_heap_region &&
|
||||
idx <= MetaspaceShared::last_open_heap_region);)
|
||||
NOT_CDS_JAVA_HEAP_RETURN_(false);
|
||||
}
|
||||
|
||||
static bool is_archived_object_during_dumptime(oop p) NOT_CDS_JAVA_HEAP_RETURN_(false);
|
||||
|
||||
static void resolve_classes(JavaThread* current) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void initialize_from_archived_subgraph(JavaThread* current, Klass* k) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "cds/archiveBuilder.hpp"
|
||||
#include "cds/archiveHeapLoader.hpp"
|
||||
#include "cds/archiveHeapWriter.hpp"
|
||||
#include "cds/cds_globals.hpp"
|
||||
#include "cds/cdsProtectionDomain.hpp"
|
||||
#include "cds/classListWriter.hpp"
|
||||
@ -82,9 +83,6 @@
|
||||
#include "utilities/ostream.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
#include "utilities/resourceHash.hpp"
|
||||
#if INCLUDE_G1GC
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#endif
|
||||
|
||||
ReservedSpace MetaspaceShared::_symbol_rs;
|
||||
VirtualSpace MetaspaceShared::_symbol_vs;
|
||||
@ -331,22 +329,16 @@ void MetaspaceShared::read_extra_data(JavaThread* current, const char* filename)
|
||||
reader.last_line_no(), utf8_length);
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
} else {
|
||||
#if INCLUDE_G1GC
|
||||
if (UseG1GC) {
|
||||
typeArrayOop body = java_lang_String::value(str);
|
||||
const HeapRegion* hr = G1CollectedHeap::heap()->heap_region_containing(body);
|
||||
if (hr->is_humongous()) {
|
||||
// Don't keep it alive, so it will be GC'ed before we dump the strings, in order
|
||||
// to maximize free heap space and minimize fragmentation.
|
||||
log_warning(cds, heap)("[line %d] extra interned string ignored; size too large: %d",
|
||||
reader.last_line_no(), utf8_length);
|
||||
continue;
|
||||
}
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
if (ArchiveHeapWriter::is_string_too_large_to_archive(str)) {
|
||||
log_warning(cds, heap)("[line %d] extra interned string ignored; size too large: %d",
|
||||
reader.last_line_no(), utf8_length);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
// Make sure this string is included in the dumped interned string table.
|
||||
assert(str != nullptr, "must succeed");
|
||||
_extra_interned_strings->append(OopHandle(Universe::vm_global(), str));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -436,7 +428,7 @@ void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread
|
||||
}
|
||||
}
|
||||
|
||||
class VM_PopulateDumpSharedSpace : public VM_GC_Operation {
|
||||
class VM_PopulateDumpSharedSpace : public VM_Operation {
|
||||
private:
|
||||
GrowableArray<MemRegion> *_closed_heap_regions;
|
||||
GrowableArray<MemRegion> *_open_heap_regions;
|
||||
@ -445,11 +437,6 @@ private:
|
||||
GrowableArray<ArchiveHeapBitmapInfo> *_open_heap_bitmaps;
|
||||
|
||||
void dump_java_heap_objects(GrowableArray<Klass*>* klasses) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
void dump_heap_bitmaps() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
void dump_heap_bitmaps(GrowableArray<MemRegion>* regions,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* bitmaps);
|
||||
void dump_one_heap_bitmap(MemRegion region, GrowableArray<ArchiveHeapBitmapInfo>* bitmaps,
|
||||
ResourceBitMap bitmap, bool is_oopmap);
|
||||
void dump_shared_symbol_table(GrowableArray<Symbol*>* symbols) {
|
||||
log_info(cds)("Dumping symbol table ...");
|
||||
SymbolTable::write_to_archive(symbols);
|
||||
@ -458,8 +445,7 @@ private:
|
||||
|
||||
public:
|
||||
|
||||
VM_PopulateDumpSharedSpace() :
|
||||
VM_GC_Operation(0 /* total collections, ignored */, GCCause::_archive_time_gc),
|
||||
VM_PopulateDumpSharedSpace() : VM_Operation(),
|
||||
_closed_heap_regions(nullptr),
|
||||
_open_heap_regions(nullptr),
|
||||
_closed_heap_bitmaps(nullptr),
|
||||
@ -508,15 +494,10 @@ char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
|
||||
WriteClosure wc(ro_region);
|
||||
MetaspaceShared::serialize(&wc);
|
||||
|
||||
// Write the bitmaps for patching the archive heap regions
|
||||
dump_heap_bitmaps();
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
void VM_PopulateDumpSharedSpace::doit() {
|
||||
HeapShared::run_full_gc_in_vm_thread();
|
||||
|
||||
DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm);
|
||||
|
||||
FileMapInfo::check_nonempty_dir_in_shared_path_table();
|
||||
@ -820,9 +801,10 @@ void MetaspaceShared::preload_and_dump_impl(TRAPS) {
|
||||
log_info(cds)("Rewriting and linking classes: done");
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
if (use_full_module_graph()) {
|
||||
HeapShared::reset_archived_object_states(CHECK);
|
||||
}
|
||||
ArchiveHeapWriter::init();
|
||||
if (use_full_module_graph()) {
|
||||
HeapShared::reset_archived_object_states(CHECK);
|
||||
}
|
||||
#endif
|
||||
|
||||
VM_PopulateDumpSharedSpace op;
|
||||
@ -895,60 +877,13 @@ void VM_PopulateDumpSharedSpace::dump_java_heap_objects(GrowableArray<Klass*>* k
|
||||
// See FileMapInfo::write_heap_regions() for details.
|
||||
_closed_heap_regions = new GrowableArray<MemRegion>(2);
|
||||
_open_heap_regions = new GrowableArray<MemRegion>(2);
|
||||
HeapShared::archive_objects(_closed_heap_regions, _open_heap_regions);
|
||||
_closed_heap_bitmaps = new GrowableArray<ArchiveHeapBitmapInfo>(2);
|
||||
_open_heap_bitmaps = new GrowableArray<ArchiveHeapBitmapInfo>(2);
|
||||
HeapShared::archive_objects(_closed_heap_regions, _open_heap_regions,
|
||||
_closed_heap_bitmaps, _open_heap_bitmaps);
|
||||
ArchiveBuilder::OtherROAllocMark mark;
|
||||
HeapShared::write_subgraph_info_table();
|
||||
}
|
||||
|
||||
void VM_PopulateDumpSharedSpace::dump_heap_bitmaps() {
|
||||
if (HeapShared::can_write()) {
|
||||
_closed_heap_bitmaps = new GrowableArray<ArchiveHeapBitmapInfo>(2);
|
||||
dump_heap_bitmaps(_closed_heap_regions, _closed_heap_bitmaps);
|
||||
|
||||
_open_heap_bitmaps = new GrowableArray<ArchiveHeapBitmapInfo>(2);
|
||||
dump_heap_bitmaps(_open_heap_regions, _open_heap_bitmaps);
|
||||
}
|
||||
}
|
||||
|
||||
void VM_PopulateDumpSharedSpace::dump_heap_bitmaps(GrowableArray<MemRegion>* regions,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* bitmaps) {
|
||||
for (int i = 0; i < regions->length(); i++) {
|
||||
MemRegion region = regions->at(i);
|
||||
ResourceBitMap oopmap = HeapShared::calculate_oopmap(region);
|
||||
ResourceBitMap ptrmap = HeapShared::calculate_ptrmap(region);
|
||||
dump_one_heap_bitmap(region, bitmaps, oopmap, true);
|
||||
dump_one_heap_bitmap(region, bitmaps, ptrmap, false);
|
||||
}
|
||||
}
|
||||
|
||||
void VM_PopulateDumpSharedSpace::dump_one_heap_bitmap(MemRegion region,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* bitmaps,
|
||||
ResourceBitMap bitmap, bool is_oopmap) {
|
||||
size_t size_in_bits = bitmap.size();
|
||||
size_t size_in_bytes;
|
||||
uintptr_t* buffer;
|
||||
|
||||
if (size_in_bits > 0) {
|
||||
size_in_bytes = bitmap.size_in_bytes();
|
||||
buffer = (uintptr_t*)NEW_C_HEAP_ARRAY(char, size_in_bytes, mtInternal);
|
||||
bitmap.write_to(buffer, size_in_bytes);
|
||||
} else {
|
||||
size_in_bytes = 0;
|
||||
buffer = nullptr;
|
||||
}
|
||||
|
||||
log_info(cds, heap)("%s = " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for heap region "
|
||||
INTPTR_FORMAT " (" SIZE_FORMAT_W(8) " bytes)",
|
||||
is_oopmap ? "Oopmap" : "Ptrmap",
|
||||
p2i(buffer), size_in_bytes,
|
||||
p2i(region.start()), region.byte_size());
|
||||
|
||||
ArchiveHeapBitmapInfo info;
|
||||
info._map = (address)buffer;
|
||||
info._size_in_bits = size_in_bits;
|
||||
info._size_in_bytes = size_in_bytes;
|
||||
bitmaps->append(info);
|
||||
}
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||
|
||||
void MetaspaceShared::set_shared_metaspace_range(void* base, void *static_top, void* top) {
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "cds/archiveBuilder.hpp"
|
||||
#include "cds/archiveHeapLoader.inline.hpp"
|
||||
#include "cds/archiveHeapWriter.hpp"
|
||||
#include "cds/filemap.hpp"
|
||||
#include "cds/heapShared.hpp"
|
||||
#include "classfile/altHashing.hpp"
|
||||
@ -770,14 +771,14 @@ public:
|
||||
EncodeSharedStringsAsOffsets(CompactHashtableWriter* writer) : _writer(writer) {}
|
||||
bool do_entry(oop s, bool value_ignored) {
|
||||
assert(s != nullptr, "sanity");
|
||||
oop new_s = HeapShared::find_archived_heap_object(s);
|
||||
if (new_s != nullptr) { // could be null if the string is too big
|
||||
unsigned int hash = java_lang_String::hash_code(s);
|
||||
if (UseCompressedOops) {
|
||||
_writer->add(hash, CompressedOops::narrow_oop_value(new_s));
|
||||
} else {
|
||||
_writer->add(hash, compute_delta(new_s));
|
||||
}
|
||||
assert(!ArchiveHeapWriter::is_string_too_large_to_archive(s), "must be");
|
||||
oop req_s = ArchiveHeapWriter::source_obj_to_requested_obj(s);
|
||||
assert(req_s != nullptr, "must have been archived");
|
||||
unsigned int hash = java_lang_String::hash_code(s);
|
||||
if (UseCompressedOops) {
|
||||
_writer->add(hash, CompressedOops::narrow_oop_value(req_s));
|
||||
} else {
|
||||
_writer->add(hash, compute_delta(req_s));
|
||||
}
|
||||
return true; // keep iterating
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -484,153 +484,3 @@ size_t G1PLABAllocator::undo_waste() const {
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h, bool open) {
|
||||
return new G1ArchiveAllocator(g1h, open);
|
||||
}
|
||||
|
||||
bool G1ArchiveAllocator::alloc_new_region() {
|
||||
// Allocate the highest free region in the reserved heap,
|
||||
// and add it to our list of allocated regions. It is marked
|
||||
// archive and added to the old set.
|
||||
HeapRegion* hr = _g1h->alloc_highest_free_region();
|
||||
if (hr == NULL) {
|
||||
return false;
|
||||
}
|
||||
assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index());
|
||||
if (_open) {
|
||||
hr->set_open_archive();
|
||||
} else {
|
||||
hr->set_closed_archive();
|
||||
}
|
||||
_g1h->policy()->remset_tracker()->update_at_allocate(hr);
|
||||
_g1h->archive_set_add(hr);
|
||||
_g1h->hr_printer()->alloc(hr);
|
||||
_allocated_regions.append(hr);
|
||||
_allocation_region = hr;
|
||||
|
||||
// Set up _bottom and _max to begin allocating in the lowest
|
||||
// min_region_size'd chunk of the allocated G1 region.
|
||||
_bottom = hr->bottom();
|
||||
_max = _bottom + HeapRegion::min_region_size_in_words();
|
||||
|
||||
// Since we've modified the old set, call update_sizes.
|
||||
_g1h->monitoring_support()->update_sizes();
|
||||
return true;
|
||||
}
|
||||
|
||||
HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
|
||||
assert(word_size != 0, "size must not be zero");
|
||||
if (_allocation_region == NULL) {
|
||||
if (!alloc_new_region()) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
HeapWord* old_top = _allocation_region->top();
|
||||
assert(_bottom >= _allocation_region->bottom(),
|
||||
"inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
|
||||
p2i(_bottom), p2i(_allocation_region->bottom()));
|
||||
assert(_max <= _allocation_region->end(),
|
||||
"inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
|
||||
p2i(_max), p2i(_allocation_region->end()));
|
||||
assert(_bottom <= old_top && old_top <= _max,
|
||||
"inconsistent allocation state: expected "
|
||||
PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
|
||||
p2i(_bottom), p2i(old_top), p2i(_max));
|
||||
|
||||
// Try to allocate word_size in the current allocation chunk. Two cases
|
||||
// require special treatment:
|
||||
// 1. no enough space for word_size
|
||||
// 2. after allocating word_size, there's non-zero space left, but too small for the minimal filler
|
||||
// In both cases, we retire the current chunk and move on to the next one.
|
||||
size_t free_words = pointer_delta(_max, old_top);
|
||||
if (free_words < word_size ||
|
||||
((free_words - word_size != 0) && (free_words - word_size < CollectedHeap::min_fill_size()))) {
|
||||
// Retiring the current chunk
|
||||
if (old_top != _max) {
|
||||
// Non-zero space; need to insert the filler
|
||||
size_t fill_size = free_words;
|
||||
CollectedHeap::fill_with_object(old_top, fill_size);
|
||||
}
|
||||
// Set the current chunk as "full"
|
||||
_allocation_region->set_top(_max);
|
||||
|
||||
// Check if we've just used up the last min_region_size'd chunk
|
||||
// in the current region, and if so, allocate a new one.
|
||||
if (_max != _allocation_region->end()) {
|
||||
// Shift to the next chunk
|
||||
old_top = _bottom = _max;
|
||||
_max = _bottom + HeapRegion::min_region_size_in_words();
|
||||
} else {
|
||||
if (!alloc_new_region()) {
|
||||
return NULL;
|
||||
}
|
||||
old_top = _allocation_region->bottom();
|
||||
}
|
||||
}
|
||||
assert(pointer_delta(_max, old_top) >= word_size, "enough space left");
|
||||
_allocation_region->set_top(old_top + word_size);
|
||||
|
||||
return old_top;
|
||||
}
|
||||
|
||||
void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
|
||||
size_t end_alignment_in_bytes) {
|
||||
assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
|
||||
"alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
|
||||
assert(is_aligned(end_alignment_in_bytes, HeapWordSize),
|
||||
"alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
|
||||
|
||||
// If we've allocated nothing, simply return.
|
||||
if (_allocation_region == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If an end alignment was requested, insert filler objects.
|
||||
if (end_alignment_in_bytes != 0) {
|
||||
HeapWord* currtop = _allocation_region->top();
|
||||
HeapWord* newtop = align_up(currtop, end_alignment_in_bytes);
|
||||
size_t fill_size = pointer_delta(newtop, currtop);
|
||||
if (fill_size != 0) {
|
||||
if (fill_size < CollectedHeap::min_fill_size()) {
|
||||
// If the required fill is smaller than we can represent,
|
||||
// bump up to the next aligned address. We know we won't exceed the current
|
||||
// region boundary because the max supported alignment is smaller than the min
|
||||
// region size, and because the allocation code never leaves space smaller than
|
||||
// the min_fill_size at the top of the current allocation region.
|
||||
newtop = align_up(currtop + CollectedHeap::min_fill_size(),
|
||||
end_alignment_in_bytes);
|
||||
fill_size = pointer_delta(newtop, currtop);
|
||||
}
|
||||
HeapWord* fill = archive_mem_allocate(fill_size);
|
||||
CollectedHeap::fill_with_objects(fill, fill_size);
|
||||
}
|
||||
}
|
||||
|
||||
// Loop through the allocated regions, and create MemRegions summarizing
|
||||
// the allocated address range, combining contiguous ranges. Add the
|
||||
// MemRegions to the GrowableArray provided by the caller.
|
||||
int index = _allocated_regions.length() - 1;
|
||||
assert(_allocated_regions.at(index) == _allocation_region,
|
||||
"expected region %u at end of array, found %u",
|
||||
_allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
|
||||
HeapWord* base_address = _allocation_region->bottom();
|
||||
HeapWord* top = base_address;
|
||||
|
||||
while (index >= 0) {
|
||||
HeapRegion* next = _allocated_regions.at(index);
|
||||
HeapWord* new_base = next->bottom();
|
||||
HeapWord* new_top = next->top();
|
||||
if (new_base != top) {
|
||||
ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
|
||||
base_address = new_base;
|
||||
}
|
||||
top = new_top;
|
||||
index = index - 1;
|
||||
}
|
||||
|
||||
assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address));
|
||||
ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
|
||||
_allocated_regions.clear();
|
||||
_allocation_region = NULL;
|
||||
};
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -225,60 +225,4 @@ public:
|
||||
void undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz, uint node_index);
|
||||
};
|
||||
|
||||
// G1ArchiveAllocator is used to allocate memory in archive
|
||||
// regions. Such regions are not scavenged nor compacted by GC.
|
||||
// There are two types of archive regions, which are
|
||||
// differ in the kind of references allowed for the contained objects:
|
||||
//
|
||||
// - 'Closed' archive region contain no references outside of other
|
||||
// closed archive regions. The region is immutable by GC. GC does
|
||||
// not mark object header in 'closed' archive region.
|
||||
// - An 'open' archive region allow references to any other regions,
|
||||
// including closed archive, open archive and other java heap regions.
|
||||
// GC can adjust pointers and mark object header in 'open' archive region.
|
||||
class G1ArchiveAllocator : public CHeapObj<mtGC> {
|
||||
protected:
|
||||
bool _open; // Indicate if the region is 'open' archive.
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
// The current allocation region
|
||||
HeapRegion* _allocation_region;
|
||||
|
||||
// Regions allocated for the current archive range.
|
||||
GrowableArrayCHeap<HeapRegion*, mtGC> _allocated_regions;
|
||||
|
||||
// Current allocation window within the current region.
|
||||
HeapWord* _bottom;
|
||||
HeapWord* _top;
|
||||
HeapWord* _max;
|
||||
|
||||
// Allocate a new region for this archive allocator.
|
||||
// Allocation is from the top of the reserved heap downward.
|
||||
bool alloc_new_region();
|
||||
|
||||
public:
|
||||
G1ArchiveAllocator(G1CollectedHeap* g1h, bool open) :
|
||||
_open(open),
|
||||
_g1h(g1h),
|
||||
_allocation_region(NULL),
|
||||
_allocated_regions(2),
|
||||
_bottom(NULL),
|
||||
_top(NULL),
|
||||
_max(NULL) { }
|
||||
|
||||
virtual ~G1ArchiveAllocator() {
|
||||
assert(_allocation_region == NULL, "_allocation_region not NULL");
|
||||
}
|
||||
|
||||
static G1ArchiveAllocator* create_allocator(G1CollectedHeap* g1h, bool open);
|
||||
|
||||
// Allocate memory for an individual object.
|
||||
HeapWord* archive_mem_allocate(size_t word_size);
|
||||
|
||||
// Return the memory ranges used in the current archive, after
|
||||
// aligning to the requested alignment.
|
||||
void complete_archive(GrowableArray<MemRegion>* ranges,
|
||||
size_t end_alignment_in_bytes);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1ALLOCATOR_HPP
|
||||
|
@ -489,40 +489,6 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::begin_archive_alloc_range(bool open) {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
assert(_archive_allocator == nullptr, "should not be initialized");
|
||||
_archive_allocator = G1ArchiveAllocator::create_allocator(this, open);
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
|
||||
// Allocations in archive regions cannot be of a size that would be considered
|
||||
// humongous even for a minimum-sized region, because G1 region sizes/boundaries
|
||||
// may be different at archive-restore time.
|
||||
return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
assert(_archive_allocator != nullptr, "_archive_allocator not initialized");
|
||||
if (is_archive_alloc_too_large(word_size)) {
|
||||
return nullptr;
|
||||
}
|
||||
return _archive_allocator->archive_mem_allocate(word_size);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
|
||||
size_t end_alignment_in_bytes) {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
assert(_archive_allocator != nullptr, "_archive_allocator not initialized");
|
||||
|
||||
// Call complete_archive to do the real work, filling in the MemRegion
|
||||
// array with the archive regions.
|
||||
_archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
|
||||
delete _archive_allocator;
|
||||
_archive_allocator = nullptr;
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
|
||||
assert(ranges != NULL, "MemRegion array NULL");
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
@ -1416,7 +1382,6 @@ G1CollectedHeap::G1CollectedHeap() :
|
||||
_verifier(NULL),
|
||||
_summary_bytes_used(0),
|
||||
_bytes_used_during_gc(0),
|
||||
_archive_allocator(nullptr),
|
||||
_survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
|
||||
_old_evac_stats("Old", OldPLABSize, PLABWeight),
|
||||
_monitoring_support(nullptr),
|
||||
@ -1803,7 +1768,6 @@ size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
|
||||
// Computes the sum of the storage used by the various regions.
|
||||
size_t G1CollectedHeap::used() const {
|
||||
size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
|
||||
assert(_archive_allocator == nullptr, "must be, should not contribute to used");
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -3045,7 +3009,6 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
|
||||
|
||||
if (!free_list_only) {
|
||||
set_used(cl.total_used());
|
||||
assert(_archive_allocator == nullptr, "must be, should not contribute to used");
|
||||
}
|
||||
assert_used_and_recalculate_used_equal(this);
|
||||
}
|
||||
@ -3244,8 +3207,6 @@ void G1CollectedHeap::update_used_after_gc(bool evacuation_failed) {
|
||||
evac_failure_injector()->reset();
|
||||
|
||||
set_used(recalculate_used());
|
||||
|
||||
assert(_archive_allocator == nullptr, "must be, should not contribute to used");
|
||||
} else {
|
||||
// The "used" of the collection set have already been subtracted
|
||||
// when they were freed. Add in the bytes used.
|
||||
|
@ -67,7 +67,6 @@
|
||||
|
||||
// Forward declarations
|
||||
class G1Allocator;
|
||||
class G1ArchiveAllocator;
|
||||
class G1BatchedTask;
|
||||
class G1CardTableEntryClosure;
|
||||
class G1ConcurrentMark;
|
||||
@ -244,9 +243,6 @@ public:
|
||||
size_t bytes_used_during_gc() const { return _bytes_used_during_gc; }
|
||||
|
||||
private:
|
||||
// Class that handles archive allocation ranges.
|
||||
G1ArchiveAllocator* _archive_allocator;
|
||||
|
||||
// GC allocation statistics policy for survivors.
|
||||
G1EvacStats _survivor_evac_stats;
|
||||
|
||||
@ -700,26 +696,6 @@ public:
|
||||
void free_humongous_region(HeapRegion* hr,
|
||||
FreeRegionList* free_list);
|
||||
|
||||
// Facility for allocating in 'archive' regions in high heap memory and
|
||||
// recording the allocated ranges. These should all be called from the
|
||||
// VM thread at safepoints, without the heap lock held. They can be used
|
||||
// to create and archive a set of heap regions which can be mapped at the
|
||||
// same fixed addresses in a subsequent JVM invocation.
|
||||
void begin_archive_alloc_range(bool open = false);
|
||||
|
||||
// Check if the requested size would be too large for an archive allocation.
|
||||
bool is_archive_alloc_too_large(size_t word_size);
|
||||
|
||||
// Allocate memory of the requested size from the archive region. This will
|
||||
// return NULL if the size is too large or if no memory is available. It
|
||||
// does not trigger a garbage collection.
|
||||
HeapWord* archive_mem_allocate(size_t word_size);
|
||||
|
||||
// Optionally aligns the end address and returns the allocated ranges in
|
||||
// an array of MemRegions in order of ascending addresses.
|
||||
void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
|
||||
size_t end_alignment_in_bytes = 0);
|
||||
|
||||
// Facility for allocating a fixed range within the heap and marking
|
||||
// the containing regions as 'archive'. For use at JVM init time, when the
|
||||
// caller may mmap archived heap data at the specified range(s).
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -310,28 +310,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
// We want all used regions to be moved to the bottom-end of the heap, so we have
|
||||
// a contiguous range of free regions at the top end of the heap. This way, we can
|
||||
// avoid fragmentation while allocating the archive regions.
|
||||
//
|
||||
// Before calling this, a full GC should have been executed with a single worker thread,
|
||||
// so that no old regions would be moved to the middle of the heap.
|
||||
void G1HeapVerifier::verify_ready_for_archiving() {
|
||||
VerifyReadyForArchivingRegionClosure cl;
|
||||
G1CollectedHeap::heap()->heap_region_iterate(&cl);
|
||||
if (cl.has_holes()) {
|
||||
log_warning(gc, verify)("All free regions should be at the top end of the heap, but"
|
||||
" we found holes. This is probably caused by (unmovable) humongous"
|
||||
" allocations or active GCLocker, and may lead to fragmentation while"
|
||||
" writing archive heap memory regions.");
|
||||
}
|
||||
if (cl.has_humongous()) {
|
||||
log_warning(gc, verify)("(Unmovable) humongous regions have been found and"
|
||||
" may lead to fragmentation while"
|
||||
" writing archive heap memory regions.");
|
||||
}
|
||||
}
|
||||
|
||||
class VerifyArchivePointerRegionClosure: public HeapRegionClosure {
|
||||
virtual bool do_heap_region(HeapRegion* r) {
|
||||
if (r->is_archive()) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -81,7 +81,6 @@ public:
|
||||
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
|
||||
void verify_dirty_young_regions() PRODUCT_RETURN;
|
||||
|
||||
static void verify_ready_for_archiving();
|
||||
static void verify_archive_regions();
|
||||
};
|
||||
|
||||
|
@ -298,7 +298,6 @@ void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
|
||||
do_full_collection(false); // don't clear all soft refs
|
||||
break;
|
||||
}
|
||||
case GCCause::_archive_time_gc:
|
||||
case GCCause::_metadata_GC_clear_soft_refs: {
|
||||
HandleMark hm(thread);
|
||||
do_full_collection(true); // do clear all soft refs
|
||||
|
@ -60,9 +60,6 @@ const char* GCCause::to_string(GCCause::Cause cause) {
|
||||
case _wb_breakpoint:
|
||||
return "WhiteBox Initiated Run to Breakpoint";
|
||||
|
||||
case _archive_time_gc:
|
||||
return "Full GC for -Xshare:dump";
|
||||
|
||||
case _no_gc:
|
||||
return "No GC";
|
||||
|
||||
|
@ -53,7 +53,6 @@ class GCCause : public AllStatic {
|
||||
_wb_young_gc,
|
||||
_wb_full_gc,
|
||||
_wb_breakpoint,
|
||||
_archive_time_gc,
|
||||
|
||||
/* implementation independent, but reserved for GC use */
|
||||
_no_gc,
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "cds/archiveHeapWriter.hpp"
|
||||
#include "cds/archiveHeapLoader.hpp"
|
||||
#include "cds/archiveBuilder.hpp"
|
||||
#include "cds/classPrelinker.hpp"
|
||||
@ -294,8 +295,7 @@ objArrayOop ConstantPool::prepare_resolved_references_for_archiving() {
|
||||
int index = object_to_cp_index(i);
|
||||
if (tag_at(index).is_string()) {
|
||||
assert(java_lang_String::is_instance(obj), "must be");
|
||||
typeArrayOop value = java_lang_String::value_no_keepalive(obj);
|
||||
if (!HeapShared::is_too_large_to_archive(value)) {
|
||||
if (!ArchiveHeapWriter::is_string_too_large_to_archive(obj)) {
|
||||
rr->obj_at_put(i, obj);
|
||||
}
|
||||
}
|
||||
@ -311,7 +311,8 @@ void ConstantPool::add_dumped_interned_strings() {
|
||||
int rr_len = rr->length();
|
||||
for (int i = 0; i < rr_len; i++) {
|
||||
oop p = rr->obj_at(i);
|
||||
if (java_lang_String::is_instance(p)) {
|
||||
if (java_lang_String::is_instance(p) &&
|
||||
!ArchiveHeapWriter::is_string_too_large_to_archive(p)) {
|
||||
HeapShared::add_to_dumped_interned_strings(p);
|
||||
}
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ class Klass;
|
||||
// Evaluating "String arg[10]" will create an objArrayOop.
|
||||
|
||||
class objArrayOopDesc : public arrayOopDesc {
|
||||
friend class ArchiveHeapWriter;
|
||||
friend class ObjArrayKlass;
|
||||
friend class Runtime1;
|
||||
friend class psPromotionManager;
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2013, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -418,7 +418,6 @@ hotspot_appcds_dynamic = \
|
||||
-runtime/cds/appcds/javaldr/ArrayTest.java \
|
||||
-runtime/cds/appcds/javaldr/ExceptionDuringDumpAtObjectsInitPhase.java \
|
||||
-runtime/cds/appcds/javaldr/GCSharedStringsDuringDump.java \
|
||||
-runtime/cds/appcds/javaldr/HumongousDuringDump.java \
|
||||
-runtime/cds/appcds/javaldr/LockDuringDump.java \
|
||||
-runtime/cds/appcds/jcmd/JCmdTestStaticDump.java \
|
||||
-runtime/cds/appcds/jcmd/JCmdTestDynamicDump.java \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -149,7 +149,6 @@ public class ArchivedIntegerCacheTest {
|
||||
"-Xlog:gc+region=trace",
|
||||
use_whitebox_jar);
|
||||
TestCommon.checkDump(output,
|
||||
"Cannot archive the sub-graph referenced from [Ljava.lang.Integer; object",
|
||||
"humongous regions have been found and may lead to fragmentation");
|
||||
"Cannot archive the sub-graph referenced from [Ljava.lang.Integer; object");
|
||||
}
|
||||
}
|
||||
|
@ -1,86 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @summary Test how CDS dumping handles the existence of humongous G1 regions.
|
||||
* @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds /test/hotspot/jtreg/runtime/cds/appcds/test-classes
|
||||
* @requires vm.cds.write.archived.java.heap
|
||||
* @requires vm.jvmti
|
||||
* @run driver/timeout=240 HumongousDuringDump
|
||||
*/
|
||||
|
||||
import jdk.test.lib.cds.CDSOptions;
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
import jdk.test.lib.process.ProcessTools;
|
||||
import jdk.test.lib.helpers.ClassFileInstaller;
|
||||
|
||||
public class HumongousDuringDump {
|
||||
public static String appClasses[] = {
|
||||
Hello.class.getName(),
|
||||
};
|
||||
public static String agentClasses[] = {
|
||||
HumongousDuringDumpTransformer.class.getName(),
|
||||
};
|
||||
|
||||
public static void main(String[] args) throws Throwable {
|
||||
String agentJar =
|
||||
ClassFileInstaller.writeJar("HumongousDuringDumpTransformer.jar",
|
||||
ClassFileInstaller.Manifest.fromSourceFile("HumongousDuringDumpTransformer.mf"),
|
||||
agentClasses);
|
||||
|
||||
String appJar =
|
||||
ClassFileInstaller.writeJar("HumongousDuringDumpApp.jar", appClasses);
|
||||
|
||||
String gcLog = Boolean.getBoolean("test.cds.verbose.gc") ?
|
||||
"-Xlog:gc*=info,gc+region=trace,gc+alloc+region=debug" : "-showversion";
|
||||
|
||||
String extraArg = "-javaagent:" + agentJar;
|
||||
String extraOption = "-XX:+AllowArchivingWithJavaAgent";
|
||||
|
||||
OutputAnalyzer out =
|
||||
TestCommon.testDump(appJar, TestCommon.list(Hello.class.getName()),
|
||||
"-XX:+UnlockDiagnosticVMOptions", extraOption,
|
||||
"-Xlog:gc+region+cds",
|
||||
"-Xlog:gc+region=trace",
|
||||
extraArg, "-Xmx64m", gcLog);
|
||||
out.shouldContain("(Unmovable) humongous regions have been found and may lead to fragmentation");
|
||||
out.shouldContain("All free regions should be at the top end of the heap, but we found holes.");
|
||||
out.shouldMatch("gc,region,cds. HeapRegion .* HUM. hole");
|
||||
String pattern = "gc,region,cds. HeapRegion .*hole";
|
||||
out.shouldMatch(pattern);
|
||||
out.shouldNotMatch(pattern + ".*unexpected");
|
||||
|
||||
TestCommon.run(
|
||||
"-cp", appJar,
|
||||
"-verbose",
|
||||
"-Xmx64m",
|
||||
"-Xlog:cds=info",
|
||||
"-XX:+UnlockDiagnosticVMOptions", extraOption,
|
||||
gcLog,
|
||||
Hello.class.getName())
|
||||
.assertNormalExit();
|
||||
}
|
||||
}
|
||||
|
@ -1,112 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
import java.lang.instrument.ClassFileTransformer;
|
||||
import java.lang.instrument.Instrumentation;
|
||||
import java.lang.instrument.IllegalClassFormatException;
|
||||
import java.security.ProtectionDomain;
|
||||
|
||||
// This test is sensitive to -Xmx. It must be run with -xmx64m.
|
||||
// Running with a different -Xmx requires changing the parameters and careful re-testing.
|
||||
public class HumongousDuringDumpTransformer implements ClassFileTransformer {
|
||||
public byte[] transform(ClassLoader loader, String name, Class<?> classBeingRedefined,
|
||||
ProtectionDomain pd, byte[] buffer) throws IllegalClassFormatException {
|
||||
if (name.equals("Hello")) {
|
||||
try {
|
||||
makeHumongousRegions();
|
||||
} catch (Throwable t) {
|
||||
array = null;
|
||||
humon = null;
|
||||
System.out.println("Unexpected error: " + t);
|
||||
t.printStackTrace();
|
||||
}
|
||||
}
|
||||
array = null;
|
||||
return null;
|
||||
}
|
||||
|
||||
private static Instrumentation savedInstrumentation;
|
||||
|
||||
public static void premain(String agentArguments, Instrumentation instrumentation) {
|
||||
long xmx = Runtime.getRuntime().maxMemory();
|
||||
if (xmx < 60 * 1024 * 1024 || xmx > 80 * 1024 * 1024) {
|
||||
System.out.println("Running with incorrect heap size: " + xmx);
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
System.out.println("ClassFileTransformer.premain() is called");
|
||||
instrumentation.addTransformer(new HumongousDuringDumpTransformer(), /*canRetransform=*/true);
|
||||
savedInstrumentation = instrumentation;
|
||||
}
|
||||
|
||||
public static Instrumentation getInstrumentation() {
|
||||
return savedInstrumentation;
|
||||
}
|
||||
|
||||
public static void agentmain(String args, Instrumentation inst) throws Exception {
|
||||
premain(args, inst);
|
||||
}
|
||||
|
||||
Object[] array;
|
||||
|
||||
static final int DUMMY_SIZE = 4096 - 16 - 8;
|
||||
static final int HUMON_SIZE = 4 * 1024 * 1024 - 16 - 8;
|
||||
static final int SKIP = 13;
|
||||
|
||||
byte humon[] = null;
|
||||
boolean first = true;
|
||||
|
||||
public synchronized void makeHumongousRegions() {
|
||||
if (!first) {
|
||||
return;
|
||||
}
|
||||
System.out.println("===============================================================================");
|
||||
first = false;
|
||||
|
||||
int total = 0;
|
||||
array = new Object[100000];
|
||||
System.out.println(array);
|
||||
|
||||
// (1) Allocate about 8MB of old objects.
|
||||
for (int n=0, i=0; total < 8 * 1024 * 1024; n++) {
|
||||
// Make enough allocations to cause a GC (for 64MB heap) to create
|
||||
// old regions.
|
||||
//
|
||||
// But don't completely fill up the heap. That would cause OOM and
|
||||
// may not be handled gracefully inside class transformation!
|
||||
Object x = new byte[DUMMY_SIZE];
|
||||
if ((n % SKIP) == 0) {
|
||||
array[i++] = x;
|
||||
total += DUMMY_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
System.gc();
|
||||
|
||||
// (2) Now allocate a humongous array. It will sit above the 8MB of old regions.
|
||||
humon = new byte[HUMON_SIZE];
|
||||
array = null;
|
||||
System.gc();
|
||||
}
|
||||
}
|
@ -1,5 +0,0 @@
|
||||
Manifest-Version: 1.0
|
||||
Premain-Class: HumongousDuringDumpTransformer
|
||||
Agent-Class: HumongousDuringDumpTransformer
|
||||
Can-Retransform-Classes: true
|
||||
Can-Redefine-Classes: true
|
Loading…
x
Reference in New Issue
Block a user