8292225: Rename ArchiveBuilder APIs related to source and buffered addresses
Reviewed-by: ccheung
This commit is contained in:
parent
155b10ae86
commit
41ce658267
@ -109,18 +109,18 @@ void ArchiveBuilder::SourceObjList::remember_embedded_pointer(SourceObjInfo* src
|
||||
|
||||
class RelocateEmbeddedPointers : public BitMapClosure {
|
||||
ArchiveBuilder* _builder;
|
||||
address _dumped_obj;
|
||||
address _buffered_obj;
|
||||
BitMap::idx_t _start_idx;
|
||||
public:
|
||||
RelocateEmbeddedPointers(ArchiveBuilder* builder, address dumped_obj, BitMap::idx_t start_idx) :
|
||||
_builder(builder), _dumped_obj(dumped_obj), _start_idx(start_idx) {}
|
||||
RelocateEmbeddedPointers(ArchiveBuilder* builder, address buffered_obj, BitMap::idx_t start_idx) :
|
||||
_builder(builder), _buffered_obj(buffered_obj), _start_idx(start_idx) {}
|
||||
|
||||
bool do_bit(BitMap::idx_t bit_offset) {
|
||||
size_t field_offset = size_t(bit_offset - _start_idx) * sizeof(address);
|
||||
address* ptr_loc = (address*)(_dumped_obj + field_offset);
|
||||
address* ptr_loc = (address*)(_buffered_obj + field_offset);
|
||||
|
||||
address old_p = *ptr_loc;
|
||||
address new_p = _builder->get_dumped_addr(old_p);
|
||||
address new_p = _builder->get_buffered_addr(old_p);
|
||||
|
||||
log_trace(cds)("Ref: [" PTR_FORMAT "] -> " PTR_FORMAT " => " PTR_FORMAT,
|
||||
p2i(ptr_loc), p2i(old_p), p2i(new_p));
|
||||
@ -136,7 +136,7 @@ void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) {
|
||||
BitMap::idx_t start = BitMap::idx_t(src_info->ptrmap_start()); // inclusive
|
||||
BitMap::idx_t end = BitMap::idx_t(src_info->ptrmap_end()); // exclusive
|
||||
|
||||
RelocateEmbeddedPointers relocator(builder, src_info->dumped_addr(), start);
|
||||
RelocateEmbeddedPointers relocator(builder, src_info->buffered_addr(), start);
|
||||
_ptrmap.iterate(&relocator, start, end);
|
||||
}
|
||||
|
||||
@ -158,7 +158,7 @@ ArchiveBuilder::ArchiveBuilder() :
|
||||
_rw_src_objs(),
|
||||
_ro_src_objs(),
|
||||
_src_obj_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
|
||||
_dumped_to_src_obj_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
|
||||
_buffered_to_src_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
|
||||
_total_closed_heap_region_size(0),
|
||||
_total_open_heap_region_size(0),
|
||||
_estimated_metaspaceobj_bytes(0),
|
||||
@ -632,10 +632,10 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
|
||||
memcpy(dest, src, bytes);
|
||||
{
|
||||
bool created;
|
||||
_dumped_to_src_obj_table.put_if_absent((address)dest, src, &created);
|
||||
_buffered_to_src_table.put_if_absent((address)dest, src, &created);
|
||||
assert(created, "must be");
|
||||
if (_dumped_to_src_obj_table.maybe_grow()) {
|
||||
log_info(cds, hashtables)("Expanded _dumped_to_src_obj_table table to %d", _dumped_to_src_obj_table.table_size());
|
||||
if (_buffered_to_src_table.maybe_grow()) {
|
||||
log_info(cds, hashtables)("Expanded _buffered_to_src_table table to %d", _buffered_to_src_table.table_size());
|
||||
}
|
||||
}
|
||||
|
||||
@ -646,23 +646,23 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
|
||||
}
|
||||
|
||||
log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes);
|
||||
src_info->set_dumped_addr((address)dest);
|
||||
src_info->set_buffered_addr((address)dest);
|
||||
|
||||
_alloc_stats.record(ref->msotype(), int(newtop - oldtop), src_info->read_only());
|
||||
}
|
||||
|
||||
address ArchiveBuilder::get_dumped_addr(address src_obj) const {
|
||||
SourceObjInfo* p = _src_obj_table.get(src_obj);
|
||||
address ArchiveBuilder::get_buffered_addr(address src_addr) const {
|
||||
SourceObjInfo* p = _src_obj_table.get(src_addr);
|
||||
assert(p != NULL, "must be");
|
||||
|
||||
return p->dumped_addr();
|
||||
return p->buffered_addr();
|
||||
}
|
||||
|
||||
address ArchiveBuilder::get_src_obj(address dumped_addr) const {
|
||||
assert(is_in_buffer_space(dumped_addr), "must be");
|
||||
address* src_obj = _dumped_to_src_obj_table.get(dumped_addr);
|
||||
assert(src_obj != NULL && *src_obj != NULL, "must be");
|
||||
return *src_obj;
|
||||
address ArchiveBuilder::get_source_addr(address buffered_addr) const {
|
||||
assert(is_in_buffer_space(buffered_addr), "must be");
|
||||
address* src_p = _buffered_to_src_table.get(buffered_addr);
|
||||
assert(src_p != NULL && *src_p != NULL, "must be");
|
||||
return *src_p;
|
||||
}
|
||||
|
||||
void ArchiveBuilder::relocate_embedded_pointers(ArchiveBuilder::SourceObjList* src_objs) {
|
||||
@ -676,7 +676,7 @@ void ArchiveBuilder::update_special_refs() {
|
||||
SpecialRefInfo s = _special_refs->at(i);
|
||||
size_t field_offset = s.field_offset();
|
||||
address src_obj = s.src_obj();
|
||||
address dst_obj = get_dumped_addr(src_obj);
|
||||
address dst_obj = get_buffered_addr(src_obj);
|
||||
intptr_t* src_p = (intptr_t*)(src_obj + field_offset);
|
||||
intptr_t* dst_p = (intptr_t*)(dst_obj + field_offset);
|
||||
assert(s.type() == MetaspaceClosure::_method_entry_ref, "only special type allowed for now");
|
||||
@ -694,7 +694,7 @@ public:
|
||||
|
||||
virtual bool do_ref(Ref* ref, bool read_only) {
|
||||
if (ref->not_null()) {
|
||||
ref->update(_builder->get_dumped_addr(ref->obj()));
|
||||
ref->update(_builder->get_buffered_addr(ref->obj()));
|
||||
ArchivePtrMarker::mark_pointer(ref->addr());
|
||||
}
|
||||
return false; // Do not recurse.
|
||||
@ -829,11 +829,11 @@ uintx ArchiveBuilder::any_to_offset(address p) const {
|
||||
return buffer_to_offset(p);
|
||||
}
|
||||
|
||||
// Update a Java object to point its Klass* to the new location after
|
||||
// shared archive has been compacted.
|
||||
void ArchiveBuilder::relocate_klass_ptr(oop o) {
|
||||
// Update a Java object to point its Klass* to the address whene
|
||||
// the class would be mapped at runtime.
|
||||
void ArchiveBuilder::relocate_klass_ptr_of_oop(oop o) {
|
||||
assert(DumpSharedSpaces, "sanity");
|
||||
Klass* k = get_relocated_klass(o->klass());
|
||||
Klass* k = get_buffered_klass(o->klass());
|
||||
Klass* requested_k = to_requested(k);
|
||||
narrowKlass nk = CompressedKlassPointers::encode_not_null(requested_k, _requested_static_archive_bottom);
|
||||
o->set_narrow_klass(nk);
|
||||
@ -981,8 +981,8 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
|
||||
Thread* current = Thread::current();
|
||||
for (int i = 0; i < src_objs->objs()->length(); i++) {
|
||||
SourceObjInfo* src_info = src_objs->at(i);
|
||||
address src = src_info->orig_obj();
|
||||
address dest = src_info->dumped_addr();
|
||||
address src = src_info->source_addr();
|
||||
address dest = src_info->buffered_addr();
|
||||
log_data(last_obj_base, dest, last_obj_base + buffer_to_runtime_delta());
|
||||
address runtime_dest = dest + buffer_to_runtime_delta();
|
||||
int bytes = src_info->size_in_bytes();
|
||||
|
@ -61,6 +61,34 @@ const int SharedSpaceObjectAlignment = KlassAlignmentInBytes;
|
||||
// [4] Copy symbol table, dictionary, etc, into the ro region
|
||||
// [5] Relocate all the pointers in rw/ro, so that the archive can be mapped to
|
||||
// the "requested" location without runtime relocation. See relocate_to_requested()
|
||||
//
|
||||
// "source" vs "buffered" vs "requested"
|
||||
//
|
||||
// The ArchiveBuilder deals with three types of addresses.
|
||||
//
|
||||
// "source": These are the addresses of objects created in step [1] above. They are the actual
|
||||
// InstanceKlass*, Method*, etc, of the Java classes that are loaded for executing
|
||||
// Java bytecodes in the JVM process that's dumping the CDS archive.
|
||||
//
|
||||
// It may be necessary to contiue Java execution after ArchiveBuilder is finished.
|
||||
// Therefore, we don't modify any of the "source" objects.
|
||||
//
|
||||
// "buffered": The "source" objects that are deemed archivable are copied into a temporary buffer.
|
||||
// Objects in the buffer are modified in steps [2, 3, 4] (e.g., unshareable info is
|
||||
// removed, pointers are relocated, etc) to prepare them to be loaded at runtime.
|
||||
//
|
||||
// "requested": These are the addreses where the "buffered" objects should be loaded at runtime.
|
||||
// When the "buffered" objects are written into the archive file, their addresses
|
||||
// are adjusted in step [5] such that the lowest of these objects would be mapped
|
||||
// at SharedBaseAddress.
|
||||
//
|
||||
// Translation between "source" and "buffered" addresses is done with two hashtables:
|
||||
// _src_obj_table : "source" -> "buffered"
|
||||
// _buffered_to_src_table : "buffered" -> "source"
|
||||
//
|
||||
// Translation between "buffered" and "requested" addresses is done with a simple shift:
|
||||
// buffered_address + _buffer_to_requested_delta == requested_address
|
||||
//
|
||||
class ArchiveBuilder : public StackObj {
|
||||
protected:
|
||||
DumpRegion* _current_dump_space;
|
||||
@ -112,37 +140,36 @@ private:
|
||||
};
|
||||
|
||||
class SourceObjInfo {
|
||||
MetaspaceClosure::Ref* _ref;
|
||||
MetaspaceClosure::Ref* _ref; // The object that's copied into the buffer
|
||||
uintx _ptrmap_start; // The bit-offset of the start of this object (inclusive)
|
||||
uintx _ptrmap_end; // The bit-offset of the end of this object (exclusive)
|
||||
bool _read_only;
|
||||
FollowMode _follow_mode;
|
||||
int _size_in_bytes;
|
||||
MetaspaceObj::Type _msotype;
|
||||
address _dumped_addr; // Address this->obj(), as used by the dumped archive.
|
||||
address _orig_obj; // The value of the original object (_ref->obj()) when this
|
||||
address _source_addr; // The value of the source object (_ref->obj()) when this
|
||||
// SourceObjInfo was created. Note that _ref->obj() may change
|
||||
// later if _ref is relocated.
|
||||
|
||||
address _buffered_addr; // The copy of _ref->obj() insider the buffer.
|
||||
public:
|
||||
SourceObjInfo(MetaspaceClosure::Ref* ref, bool read_only, FollowMode follow_mode) :
|
||||
_ref(ref), _ptrmap_start(0), _ptrmap_end(0), _read_only(read_only), _follow_mode(follow_mode),
|
||||
_size_in_bytes(ref->size() * BytesPerWord), _msotype(ref->msotype()),
|
||||
_orig_obj(ref->obj()) {
|
||||
_source_addr(ref->obj()) {
|
||||
if (follow_mode == point_to_it) {
|
||||
_dumped_addr = ref->obj();
|
||||
_buffered_addr = ref->obj();
|
||||
} else {
|
||||
_dumped_addr = NULL;
|
||||
_buffered_addr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
bool should_copy() const { return _follow_mode == make_a_copy; }
|
||||
MetaspaceClosure::Ref* ref() const { return _ref; }
|
||||
void set_dumped_addr(address dumped_addr) {
|
||||
void set_buffered_addr(address addr) {
|
||||
assert(should_copy(), "must be");
|
||||
assert(_dumped_addr == NULL, "cannot be copied twice");
|
||||
assert(dumped_addr != NULL, "must be a valid copy");
|
||||
_dumped_addr = dumped_addr;
|
||||
assert(_buffered_addr == NULL, "cannot be copied twice");
|
||||
assert(addr != NULL, "must be a valid copy");
|
||||
_buffered_addr = addr;
|
||||
}
|
||||
void set_ptrmap_start(uintx v) { _ptrmap_start = v; }
|
||||
void set_ptrmap_end(uintx v) { _ptrmap_end = v; }
|
||||
@ -150,8 +177,8 @@ private:
|
||||
uintx ptrmap_end() const { return _ptrmap_end; } // exclusive
|
||||
bool read_only() const { return _read_only; }
|
||||
int size_in_bytes() const { return _size_in_bytes; }
|
||||
address orig_obj() const { return _orig_obj; }
|
||||
address dumped_addr() const { return _dumped_addr; }
|
||||
address source_addr() const { return _source_addr; }
|
||||
address buffered_addr() const { return _buffered_addr; }
|
||||
MetaspaceObj::Type msotype() const { return _msotype; }
|
||||
|
||||
// convenience accessor
|
||||
@ -200,7 +227,7 @@ private:
|
||||
SourceObjList _rw_src_objs; // objs to put in rw region
|
||||
SourceObjList _ro_src_objs; // objs to put in ro region
|
||||
ResizeableResourceHashtable<address, SourceObjInfo, ResourceObj::C_HEAP, mtClassShared> _src_obj_table;
|
||||
ResizeableResourceHashtable<address, address, ResourceObj::C_HEAP, mtClassShared> _dumped_to_src_obj_table;
|
||||
ResizeableResourceHashtable<address, address, ResourceObj::C_HEAP, mtClassShared> _buffered_to_src_table;
|
||||
GrowableArray<Klass*>* _klasses;
|
||||
GrowableArray<Symbol*>* _symbols;
|
||||
GrowableArray<SpecialRefInfo>* _special_refs;
|
||||
@ -384,16 +411,10 @@ public:
|
||||
void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region,
|
||||
bool read_only, bool allow_exec);
|
||||
|
||||
// + When creating a CDS archive, we first load Java classes and create metadata
|
||||
// objects as usual. These are call "source" objects.
|
||||
// + We then copy the source objects into the output buffer at "dumped addresses".
|
||||
//
|
||||
// The following functions translate between these two (non-overlapping) spaces.
|
||||
// (The API should be renamed to be less confusing!)
|
||||
address get_dumped_addr(address src_obj) const;
|
||||
address get_src_obj(address dumped_addr) const;
|
||||
template <typename T> T get_src_obj(T dumped_addr) const {
|
||||
return (T)get_src_obj((address)dumped_addr);
|
||||
address get_buffered_addr(address src_addr) const;
|
||||
address get_source_addr(address buffered_addr) const;
|
||||
template <typename T> T get_source_addr(T buffered_addr) const {
|
||||
return (T)get_source_addr((address)buffered_addr);
|
||||
}
|
||||
|
||||
// All klasses and symbols that will be copied into the archive
|
||||
@ -422,16 +443,16 @@ public:
|
||||
return alloc_stats()->string_stats();
|
||||
}
|
||||
|
||||
void relocate_klass_ptr(oop o);
|
||||
void relocate_klass_ptr_of_oop(oop o);
|
||||
|
||||
static Klass* get_relocated_klass(Klass* orig_klass) {
|
||||
Klass* klass = (Klass*)current()->get_dumped_addr((address)orig_klass);
|
||||
static Klass* get_buffered_klass(Klass* src_klass) {
|
||||
Klass* klass = (Klass*)current()->get_buffered_addr((address)src_klass);
|
||||
assert(klass != NULL && klass->is_klass(), "must be");
|
||||
return klass;
|
||||
}
|
||||
|
||||
static Symbol* get_relocated_symbol(Symbol* orig_symbol) {
|
||||
return (Symbol*)current()->get_dumped_addr((address)orig_symbol);
|
||||
static Symbol* get_buffered_symbol(Symbol* src_symbol) {
|
||||
return (Symbol*)current()->get_buffered_addr((address)src_symbol);
|
||||
}
|
||||
|
||||
void print_stats();
|
||||
|
@ -58,10 +58,6 @@ public:
|
||||
ArchivePtrMarker::mark_pointer(ptr_loc);
|
||||
}
|
||||
|
||||
template <typename T> T get_dumped_addr(T obj) {
|
||||
return (T)ArchiveBuilder::get_dumped_addr((address)obj);
|
||||
}
|
||||
|
||||
static int dynamic_dump_method_comparator(Method* a, Method* b) {
|
||||
Symbol* a_name = a->name();
|
||||
Symbol* b_name = b->name();
|
||||
|
@ -340,7 +340,7 @@ void HeapShared::archive_klass_objects() {
|
||||
GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
|
||||
assert(klasses != NULL, "sanity");
|
||||
for (int i = 0; i < klasses->length(); i++) {
|
||||
Klass* k = ArchiveBuilder::get_relocated_klass(klasses->at(i));
|
||||
Klass* k = ArchiveBuilder::get_buffered_klass(klasses->at(i));
|
||||
|
||||
// archive mirror object
|
||||
java_lang_Class::archive_mirror(k);
|
||||
@ -377,7 +377,7 @@ void HeapShared::check_enum_obj(int level,
|
||||
oop orig_obj,
|
||||
bool is_closed_archive) {
|
||||
Klass* k = orig_obj->klass();
|
||||
Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k);
|
||||
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(k);
|
||||
if (!k->is_instance_klass()) {
|
||||
return;
|
||||
}
|
||||
@ -385,7 +385,7 @@ void HeapShared::check_enum_obj(int level,
|
||||
if (ik->java_super() == vmClasses::Enum_klass() && !ik->has_archived_enum_objs()) {
|
||||
ResourceMark rm;
|
||||
ik->set_has_archived_enum_objs();
|
||||
relocated_k->set_has_archived_enum_objs();
|
||||
buffered_k->set_has_archived_enum_objs();
|
||||
oop mirror = ik->java_mirror();
|
||||
|
||||
for (JavaFieldStream fs(ik); !fs.done(); fs.next()) {
|
||||
@ -567,14 +567,14 @@ HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info
|
||||
HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table;
|
||||
|
||||
// Get the subgraph_info for Klass k. A new subgraph_info is created if
|
||||
// there is no existing one for k. The subgraph_info records the relocated
|
||||
// Klass* of the original k.
|
||||
// there is no existing one for k. The subgraph_info records the "buffered"
|
||||
// address of the class.
|
||||
KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) {
|
||||
assert(DumpSharedSpaces, "dump time only");
|
||||
bool created;
|
||||
Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k);
|
||||
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(k);
|
||||
KlassSubGraphInfo* info =
|
||||
_dump_time_subgraph_info_table->put_if_absent(k, KlassSubGraphInfo(relocated_k, is_full_module_graph),
|
||||
_dump_time_subgraph_info_table->put_if_absent(k, KlassSubGraphInfo(buffered_k, is_full_module_graph),
|
||||
&created);
|
||||
assert(created, "must not initialize twice");
|
||||
return info;
|
||||
@ -603,23 +603,23 @@ void KlassSubGraphInfo::add_subgraph_entry_field(
|
||||
// Only objects of boot classes can be included in sub-graph.
|
||||
void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
|
||||
assert(DumpSharedSpaces, "dump time only");
|
||||
Klass* relocated_k = ArchiveBuilder::get_relocated_klass(orig_k);
|
||||
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
|
||||
|
||||
if (_subgraph_object_klasses == NULL) {
|
||||
_subgraph_object_klasses =
|
||||
new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass);
|
||||
}
|
||||
|
||||
assert(ArchiveBuilder::current()->is_in_buffer_space(relocated_k), "must be a shared class");
|
||||
assert(ArchiveBuilder::current()->is_in_buffer_space(buffered_k), "must be a shared class");
|
||||
|
||||
if (_k == relocated_k) {
|
||||
if (_k == buffered_k) {
|
||||
// Don't add the Klass containing the sub-graph to it's own klass
|
||||
// initialization list.
|
||||
return;
|
||||
}
|
||||
|
||||
if (relocated_k->is_instance_klass()) {
|
||||
assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(),
|
||||
if (buffered_k->is_instance_klass()) {
|
||||
assert(InstanceKlass::cast(buffered_k)->is_shared_boot_class(),
|
||||
"must be boot class");
|
||||
// vmClasses::xxx_klass() are not updated, need to check
|
||||
// the original Klass*
|
||||
@ -630,32 +630,32 @@ void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
|
||||
return;
|
||||
}
|
||||
check_allowed_klass(InstanceKlass::cast(orig_k));
|
||||
} else if (relocated_k->is_objArray_klass()) {
|
||||
Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass();
|
||||
} else if (buffered_k->is_objArray_klass()) {
|
||||
Klass* abk = ObjArrayKlass::cast(buffered_k)->bottom_klass();
|
||||
if (abk->is_instance_klass()) {
|
||||
assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
|
||||
"must be boot class");
|
||||
check_allowed_klass(InstanceKlass::cast(ObjArrayKlass::cast(orig_k)->bottom_klass()));
|
||||
}
|
||||
if (relocated_k == Universe::objectArrayKlassObj()) {
|
||||
if (buffered_k == Universe::objectArrayKlassObj()) {
|
||||
// Initialized early during Universe::genesis. No need to be added
|
||||
// to the list.
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
assert(relocated_k->is_typeArray_klass(), "must be");
|
||||
assert(buffered_k->is_typeArray_klass(), "must be");
|
||||
// Primitive type arrays are created early during Universe::genesis.
|
||||
return;
|
||||
}
|
||||
|
||||
if (log_is_enabled(Debug, cds, heap)) {
|
||||
if (!_subgraph_object_klasses->contains(relocated_k)) {
|
||||
if (!_subgraph_object_klasses->contains(buffered_k)) {
|
||||
ResourceMark rm;
|
||||
log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
|
||||
}
|
||||
}
|
||||
|
||||
_subgraph_object_klasses->append_if_missing(relocated_k);
|
||||
_subgraph_object_klasses->append_if_missing(buffered_k);
|
||||
_has_non_early_klasses |= is_non_early_klass(orig_k);
|
||||
}
|
||||
|
||||
@ -767,8 +767,8 @@ struct CopyKlassSubGraphInfoToArchive : StackObj {
|
||||
(ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
|
||||
record->init(&info);
|
||||
|
||||
Klass* relocated_k = ArchiveBuilder::get_relocated_klass(klass);
|
||||
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)relocated_k);
|
||||
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(klass);
|
||||
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)buffered_k);
|
||||
u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record);
|
||||
_writer->add(hash, delta);
|
||||
}
|
||||
@ -1786,7 +1786,7 @@ ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
|
||||
o->oop_iterate(&finder);
|
||||
p += o->size();
|
||||
if (DumpSharedSpaces) {
|
||||
builder->relocate_klass_ptr(o);
|
||||
builder->relocate_klass_ptr_of_oop(o);
|
||||
}
|
||||
++ num_objs;
|
||||
}
|
||||
|
@ -1120,7 +1120,7 @@ class ResetMirrorField: public FieldClosure {
|
||||
static void set_klass_field_in_archived_mirror(oop mirror_obj, int offset, Klass* k) {
|
||||
assert(java_lang_Class::is_instance(mirror_obj), "must be");
|
||||
// this is the copy of k in the output buffer
|
||||
Klass* copy = ArchiveBuilder::get_relocated_klass(k);
|
||||
Klass* copy = ArchiveBuilder::get_buffered_klass(k);
|
||||
|
||||
// This is the address of k, if the archive is loaded at the requested location
|
||||
Klass* def = ArchiveBuilder::current()->to_requested(copy);
|
||||
|
@ -450,15 +450,15 @@ void ModuleEntry::init_as_archived_entry() {
|
||||
_loader_data = NULL; // re-init at runtime
|
||||
_shared_path_index = FileMapInfo::get_module_shared_path_index(_location);
|
||||
if (name() != NULL) {
|
||||
_name = ArchiveBuilder::get_relocated_symbol(_name);
|
||||
_name = ArchiveBuilder::get_buffered_symbol(_name);
|
||||
ArchivePtrMarker::mark_pointer((address*)&_name);
|
||||
}
|
||||
_reads = (GrowableArray<ModuleEntry*>*)archived_reads;
|
||||
if (_version != NULL) {
|
||||
_version = ArchiveBuilder::get_relocated_symbol(_version);
|
||||
_version = ArchiveBuilder::get_buffered_symbol(_version);
|
||||
}
|
||||
if (_location != NULL) {
|
||||
_location = ArchiveBuilder::get_relocated_symbol(_location);
|
||||
_location = ArchiveBuilder::get_buffered_symbol(_location);
|
||||
}
|
||||
JFR_ONLY(set_trace_id(0));// re-init at runtime
|
||||
|
||||
|
@ -248,7 +248,7 @@ void PackageEntry::iterate_symbols(MetaspaceClosure* closure) {
|
||||
void PackageEntry::init_as_archived_entry() {
|
||||
Array<ModuleEntry*>* archived_qualified_exports = ModuleEntry::write_growable_array(_qualified_exports);
|
||||
|
||||
_name = ArchiveBuilder::get_relocated_symbol(_name);
|
||||
_name = ArchiveBuilder::get_buffered_symbol(_name);
|
||||
_module = ModuleEntry::get_archived_entry(_module);
|
||||
_qualified_exports = (GrowableArray<ModuleEntry*>*)archived_qualified_exports;
|
||||
_defined_by_cds_in_class_path = 0;
|
||||
|
@ -629,7 +629,7 @@ void SymbolTable::copy_shared_symbol_table(GrowableArray<Symbol*>* symbols,
|
||||
ArchiveBuilder* builder = ArchiveBuilder::current();
|
||||
int len = symbols->length();
|
||||
for (int i = 0; i < len; i++) {
|
||||
Symbol* sym = ArchiveBuilder::get_relocated_symbol(symbols->at(i));
|
||||
Symbol* sym = ArchiveBuilder::get_buffered_symbol(symbols->at(i));
|
||||
unsigned int fixed_hash = hash_shared_symbol((const char*)sym->bytes(), sym->utf8_length());
|
||||
assert(fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length(), false),
|
||||
"must not rehash during dumping");
|
||||
|
Loading…
Reference in New Issue
Block a user