8247912: Make narrowOop a scoped enum
Reviewed-by: iklam, stefank
This commit is contained in:
parent
928da494a8
commit
2d9fa9da02
src/hotspot
cpu
aarch64
ppc
s390
x86
share
classfile
gc
jfr/leakprofiler/chains
memory
oops
compressedOops.hppcompressedOops.inline.hppcpCache.inline.hppklass.cppklass.hppoop.cppoopsHierarchy.hpp
runtime
@ -175,7 +175,7 @@ int MacroAssembler::patch_oop(address insn_addr, address o) {
|
||||
// instruction.
|
||||
if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
|
||||
// Move narrow OOP
|
||||
narrowOop n = CompressedOops::encode((oop)o);
|
||||
uint32_t n = CompressedOops::narrow_oop_value((oop)o);
|
||||
Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
|
||||
Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
|
||||
instructions = 2;
|
||||
|
@ -220,8 +220,9 @@ address MacroAssembler::patch_set_narrow_oop(address a, address bound, narrowOop
|
||||
}
|
||||
assert(inst1_found, "inst is not lis");
|
||||
|
||||
int xc = (data >> 16) & 0xffff;
|
||||
int xd = (data >> 0) & 0xffff;
|
||||
uint32_t data_value = CompressedOops::narrow_oop_value(data);
|
||||
int xc = (data_value >> 16) & 0xffff;
|
||||
int xd = (data_value >> 0) & 0xffff;
|
||||
|
||||
set_imm((int *)inst1_addr, (short)(xc)); // see enc_load_con_narrow_hi/_lo
|
||||
set_imm((int *)inst2_addr, (xd)); // unsigned int
|
||||
@ -254,7 +255,7 @@ narrowOop MacroAssembler::get_narrow_oop(address a, address bound) {
|
||||
uint xl = ((unsigned int) (get_imm(inst2_addr, 0) & 0xffff));
|
||||
uint xh = (((get_imm(inst1_addr, 0)) & 0xffff) << 16);
|
||||
|
||||
return (int) (xl | xh);
|
||||
return CompressedOops::narrow_oop_cast(xl | xh);
|
||||
}
|
||||
#endif // _LP64
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -196,7 +196,7 @@ intptr_t NativeMovConstReg::data() const {
|
||||
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(addr);
|
||||
if (MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) {
|
||||
narrowOop no = (narrowOop)MacroAssembler::get_narrow_oop(addr, cb->content_begin());
|
||||
narrowOop no = MacroAssembler::get_narrow_oop(addr, cb->content_begin());
|
||||
return cast_from_oop<intptr_t>(CompressedOops::decode(no));
|
||||
} else {
|
||||
assert(MacroAssembler::is_load_const_from_method_toc_at(addr), "must be load_const_from_pool");
|
||||
@ -293,10 +293,11 @@ void NativeMovConstReg::set_data(intptr_t data) {
|
||||
void NativeMovConstReg::set_narrow_oop(narrowOop data, CodeBlob *code /* = NULL */) {
|
||||
address inst2_addr = addr_at(0);
|
||||
CodeBlob* cb = (code) ? code : CodeCache::find_blob(instruction_address());
|
||||
if (MacroAssembler::get_narrow_oop(inst2_addr, cb->content_begin()) == (long)data)
|
||||
if (MacroAssembler::get_narrow_oop(inst2_addr, cb->content_begin()) == data) {
|
||||
return;
|
||||
}
|
||||
const address inst1_addr =
|
||||
MacroAssembler::patch_set_narrow_oop(inst2_addr, cb->content_begin(), (long)data);
|
||||
MacroAssembler::patch_set_narrow_oop(inst2_addr, cb->content_begin(), data);
|
||||
assert(inst1_addr != NULL && inst1_addr < inst2_addr, "first instruction must be found");
|
||||
const int range = inst2_addr - inst1_addr + BytesPerInstWord;
|
||||
ICache::ppc64_flush_icache_bytes(inst1_addr, range);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -58,7 +58,9 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type,
|
||||
"how to encode else?");
|
||||
narrowOop no = (type() == relocInfo::oop_type) ?
|
||||
CompressedOops::encode((oop)x) : CompressedKlassPointers::encode((Klass*)x);
|
||||
CompressedOops::encode((oop)x) :
|
||||
// Type punning compressed klass pointer as narrowOop.
|
||||
CompressedOops::narrow_oop_cast(CompressedKlassPointers::encode((Klass*)x));
|
||||
nativeMovConstReg_at(addr())->set_narrow_oop(no, code());
|
||||
}
|
||||
} else {
|
||||
|
@ -1163,7 +1163,7 @@ void MacroAssembler::load_const_32to64(Register t, int64_t x, bool sign_extend)
|
||||
// Load narrow oop constant, no decompression.
|
||||
void MacroAssembler::load_narrow_oop(Register t, narrowOop a) {
|
||||
assert(UseCompressedOops, "must be on to call this method");
|
||||
load_const_32to64(t, a, false /*sign_extend*/);
|
||||
load_const_32to64(t, CompressedOops::narrow_oop_value(a), false /*sign_extend*/);
|
||||
}
|
||||
|
||||
// Load narrow klass constant, compression required.
|
||||
@ -1181,7 +1181,7 @@ void MacroAssembler::load_narrow_klass(Register t, Klass* k) {
|
||||
void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) {
|
||||
assert(UseCompressedOops, "must be on to call this method");
|
||||
|
||||
Assembler::z_clfi(oop1, oop2);
|
||||
Assembler::z_clfi(oop1, CompressedOops::narrow_oop_value(oop2));
|
||||
}
|
||||
|
||||
// Compare narrow oop in reg with narrow oop constant, no decompression.
|
||||
@ -1273,9 +1273,7 @@ int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) {
|
||||
// The passed ptr must NOT be in compressed format!
|
||||
int MacroAssembler::patch_load_narrow_oop(address pos, oop o) {
|
||||
assert(UseCompressedOops, "Can only patch compressed oops");
|
||||
|
||||
narrowOop no = CompressedOops::encode(o);
|
||||
return patch_load_const_32to64(pos, no);
|
||||
return patch_load_const_32to64(pos, CompressedOops::narrow_oop_value(o));
|
||||
}
|
||||
|
||||
// Patching the immediate value of CPU version dependent load_narrow_klass sequence.
|
||||
@ -1291,9 +1289,7 @@ int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) {
|
||||
// The passed ptr must NOT be in compressed format!
|
||||
int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) {
|
||||
assert(UseCompressedOops, "Can only patch compressed oops");
|
||||
|
||||
narrowOop no = CompressedOops::encode(o);
|
||||
return patch_compare_immediate_32(pos, no);
|
||||
return patch_compare_immediate_32(pos, CompressedOops::narrow_oop_value(o));
|
||||
}
|
||||
|
||||
// Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence.
|
||||
|
@ -52,12 +52,13 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
address disp = Assembler::locate_operand(addr(), which);
|
||||
// both compressed oops and compressed classes look the same
|
||||
if (CompressedOops::is_in((void*)x)) {
|
||||
if (verify_only) {
|
||||
guarantee(*(uint32_t*) disp == CompressedOops::encode((oop)x), "instructions must match");
|
||||
uint32_t encoded = CompressedOops::narrow_oop_value((oop)x);
|
||||
if (verify_only) {
|
||||
guarantee(*(uint32_t*) disp == encoded, "instructions must match");
|
||||
} else {
|
||||
*(int32_t*) disp = encoded;
|
||||
}
|
||||
} else {
|
||||
*(int32_t*) disp = CompressedOops::encode((oop)x);
|
||||
}
|
||||
} else {
|
||||
if (verify_only) {
|
||||
guarantee(*(uint32_t*) disp == CompressedKlassPointers::encode((Klass*)x), "instructions must match");
|
||||
} else {
|
||||
|
@ -67,7 +67,7 @@ const double CLEAN_DEAD_HIGH_WATER_MARK = 0.5;
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
inline oop read_string_from_compact_hashtable(address base_address, u4 offset) {
|
||||
assert(sizeof(narrowOop) == sizeof(offset), "must be");
|
||||
narrowOop v = (narrowOop)offset;
|
||||
narrowOop v = CompressedOops::narrow_oop_cast(offset);
|
||||
return HeapShared::decode_from_archive(v);
|
||||
}
|
||||
|
||||
@ -750,7 +750,7 @@ public:
|
||||
}
|
||||
|
||||
// add to the compact table
|
||||
_writer->add(hash, CompressedOops::encode(new_s));
|
||||
_writer->add(hash, CompressedOops::narrow_oop_value(new_s));
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -70,11 +70,12 @@ bool BlockLocationPrinter<CollectedHeapT>::print_location(outputStream* st, void
|
||||
// Compressed oop needs to be decoded first.
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops && ((uintptr_t)addr &~ (uintptr_t)max_juint) == 0) {
|
||||
narrowOop narrow_oop = (narrowOop)(uintptr_t)addr;
|
||||
narrowOop narrow_oop = CompressedOops::narrow_oop_cast((uintptr_t)addr);
|
||||
oop o = CompressedOops::decode_raw(narrow_oop);
|
||||
|
||||
if (is_valid_obj(o)) {
|
||||
st->print(UINT32_FORMAT " is a compressed pointer to object: ", narrow_oop);
|
||||
st->print(UINT32_FORMAT " is a compressed pointer to object: ",
|
||||
CompressedOops::narrow_oop_value(narrow_oop));
|
||||
o->print_on(st);
|
||||
return true;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,7 +39,7 @@ class ReferenceProcessorPhaseTimes;
|
||||
// List of discovered references.
|
||||
class DiscoveredList {
|
||||
public:
|
||||
DiscoveredList() : _oop_head(NULL), _compressed_head(0), _len(0) { }
|
||||
DiscoveredList() : _oop_head(NULL), _compressed_head(narrowOop::null), _len(0) { }
|
||||
inline oop head() const;
|
||||
HeapWord* adr_head() {
|
||||
return UseCompressedOops ? (HeapWord*)&_compressed_head :
|
||||
|
@ -149,14 +149,14 @@ inline oop ShenandoahHeap::cas_oop(oop n, oop* addr, oop c) {
|
||||
inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, narrowOop c) {
|
||||
assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
|
||||
narrowOop val = CompressedOops::encode(n);
|
||||
return CompressedOops::decode((narrowOop) Atomic::cmpxchg(addr, c, val));
|
||||
return CompressedOops::decode(Atomic::cmpxchg(addr, c, val));
|
||||
}
|
||||
|
||||
inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, oop c) {
|
||||
assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
|
||||
narrowOop cmp = CompressedOops::encode(c);
|
||||
narrowOop val = CompressedOops::encode(n);
|
||||
return CompressedOops::decode((narrowOop) Atomic::cmpxchg(addr, cmp, val));
|
||||
return CompressedOops::decode(Atomic::cmpxchg(addr, cmp, val));
|
||||
}
|
||||
|
||||
template <class T>
|
||||
|
@ -57,7 +57,7 @@ template <typename Delegate>
|
||||
void RootSetClosure<Delegate>::do_oop(narrowOop* ref) {
|
||||
assert(ref != NULL, "invariant");
|
||||
assert(is_aligned(ref, sizeof(narrowOop)), "invariant");
|
||||
if (*ref != 0) {
|
||||
if (CompressedOops::is_null(*ref)) {
|
||||
_delegate->do_root(UnifiedOopRef::encode_in_native(ref));
|
||||
}
|
||||
}
|
||||
|
@ -271,8 +271,8 @@ void ReadClosure::do_tag(int tag) {
|
||||
}
|
||||
|
||||
void ReadClosure::do_oop(oop *p) {
|
||||
narrowOop o = (narrowOop)nextPtr();
|
||||
if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) {
|
||||
narrowOop o = CompressedOops::narrow_oop_cast(nextPtr());
|
||||
if (CompressedOops::is_null(o) || !HeapShared::open_archive_heap_region_mapped()) {
|
||||
*p = NULL;
|
||||
} else {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(),
|
||||
|
@ -1663,8 +1663,7 @@ size_t FileMapInfo::read_bytes(void* buffer, size_t count) {
|
||||
|
||||
address FileMapInfo::decode_start_address(FileMapRegion* spc, bool with_current_oop_encoding_mode) {
|
||||
size_t offset = spc->mapping_offset();
|
||||
assert(offset == (size_t)(uint32_t)offset, "must be 32-bit only");
|
||||
uint n = (uint)offset;
|
||||
narrowOop n = CompressedOops::narrow_oop_cast(offset);
|
||||
if (with_current_oop_encoding_mode) {
|
||||
return cast_from_oop<address>(CompressedOops::decode_not_null(n));
|
||||
} else {
|
||||
|
@ -383,7 +383,7 @@ void KlassSubGraphInfo::add_subgraph_entry_field(
|
||||
new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, mtClass);
|
||||
}
|
||||
_subgraph_entry_fields->append((juint)static_field_offset);
|
||||
_subgraph_entry_fields->append(CompressedOops::encode(v));
|
||||
_subgraph_entry_fields->append(CompressedOops::narrow_oop_value(v));
|
||||
_subgraph_entry_fields->append(is_closed_archive ? 1 : 0);
|
||||
}
|
||||
|
||||
@ -591,7 +591,7 @@ void HeapShared::initialize_from_archived_subgraph(Klass* k, TRAPS) {
|
||||
assert(efr_len % 3 == 0, "sanity");
|
||||
for (i = 0; i < efr_len;) {
|
||||
int field_offset = entry_field_records->at(i);
|
||||
narrowOop nv = entry_field_records->at(i+1);
|
||||
narrowOop nv = CompressedOops::narrow_oop_cast(entry_field_records->at(i+1));
|
||||
int is_closed_archive = entry_field_records->at(i+2);
|
||||
oop v;
|
||||
if (is_closed_archive == 0) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,6 +29,7 @@
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include <type_traits>
|
||||
|
||||
class outputStream;
|
||||
class ReservedHeapSpace;
|
||||
@ -76,6 +77,10 @@ public:
|
||||
AnyNarrowOopMode = 4
|
||||
};
|
||||
|
||||
// The representation type for narrowOop is assumed to be uint32_t.
|
||||
static_assert(std::is_same<uint32_t, std::underlying_type_t<narrowOop>>::value,
|
||||
"narrowOop has unexpected representation type");
|
||||
|
||||
static void initialize(const ReservedHeapSpace& heap_space);
|
||||
|
||||
static void set_base(address base);
|
||||
@ -112,7 +117,7 @@ public:
|
||||
static void print_mode(outputStream* st);
|
||||
|
||||
static bool is_null(oop v) { return v == NULL; }
|
||||
static bool is_null(narrowOop v) { return v == 0; }
|
||||
static bool is_null(narrowOop v) { return v == narrowOop::null; }
|
||||
|
||||
static inline oop decode_raw(narrowOop v);
|
||||
static inline oop decode_not_null(narrowOop v);
|
||||
@ -125,6 +130,12 @@ public:
|
||||
static oop decode(oop v) { return v; }
|
||||
static narrowOop encode_not_null(narrowOop v) { return v; }
|
||||
static narrowOop encode(narrowOop v) { return v; }
|
||||
|
||||
static inline uint32_t narrow_oop_value(oop o);
|
||||
static inline uint32_t narrow_oop_value(narrowOop o);
|
||||
|
||||
template<typename T>
|
||||
static inline narrowOop narrow_oop_cast(T i);
|
||||
};
|
||||
|
||||
// For UseCompressedClassPointers.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -62,14 +62,30 @@ inline narrowOop CompressedOops::encode_not_null(oop v) {
|
||||
assert(is_in(v), "address not in heap range: " PTR_FORMAT, p2i((void*)v));
|
||||
uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base(), 1));
|
||||
assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
|
||||
uint64_t result = pd >> shift();
|
||||
assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
|
||||
narrowOop result = narrow_oop_cast(pd >> shift());
|
||||
assert(decode(result) == v, "reversibility");
|
||||
return (narrowOop)result;
|
||||
return result;
|
||||
}
|
||||
|
||||
inline narrowOop CompressedOops::encode(oop v) {
|
||||
return is_null(v) ? (narrowOop)0 : encode_not_null(v);
|
||||
return is_null(v) ? narrowOop::null : encode_not_null(v);
|
||||
}
|
||||
|
||||
inline uint32_t CompressedOops::narrow_oop_value(oop o) {
|
||||
return narrow_oop_value(encode(o));
|
||||
}
|
||||
|
||||
inline uint32_t CompressedOops::narrow_oop_value(narrowOop o) {
|
||||
return static_cast<uint32_t>(o);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline narrowOop CompressedOops::narrow_oop_cast(T i) {
|
||||
static_assert(std::is_integral<T>::value, "precondition");
|
||||
uint32_t narrow_value = static_cast<uint32_t>(i);
|
||||
// Ensure no bits lost in conversion to uint32_t.
|
||||
assert(i == static_cast<T>(narrow_value), "narrowOop overflow");
|
||||
return static_cast<narrowOop>(narrow_value);
|
||||
}
|
||||
|
||||
static inline bool check_alignment(Klass* v) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -89,7 +89,7 @@ inline ConstantPoolCache::ConstantPoolCache(int length,
|
||||
const intStack& invokedynamic_references_map) :
|
||||
_length(length),
|
||||
_constant_pool(NULL) {
|
||||
CDS_JAVA_HEAP_ONLY(_archived_references = 0;)
|
||||
CDS_JAVA_HEAP_ONLY(_archived_references = narrowOop::null;)
|
||||
initialize(inverse_index_map, invokedynamic_inverse_index_map,
|
||||
invokedynamic_references_map);
|
||||
for (int i = 0; i < length; i++) {
|
||||
|
@ -204,7 +204,7 @@ Klass::Klass(KlassID id) : _id(id),
|
||||
_prototype_header(markWord::prototype()),
|
||||
_shared_class_path_index(-1) {
|
||||
CDS_ONLY(_shared_class_flags = 0;)
|
||||
CDS_JAVA_HEAP_ONLY(_archived_mirror = 0;)
|
||||
CDS_JAVA_HEAP_ONLY(_archived_mirror = narrowOop::null;)
|
||||
_primary_supers[0] = this;
|
||||
set_super_check_offset(in_bytes(primary_supers_offset()));
|
||||
}
|
||||
|
@ -263,7 +263,7 @@ protected:
|
||||
void set_java_mirror(Handle m);
|
||||
|
||||
oop archived_java_mirror_raw() NOT_CDS_JAVA_HEAP_RETURN_(NULL); // no GC barrier
|
||||
narrowOop archived_java_mirror_raw_narrow() NOT_CDS_JAVA_HEAP_RETURN_(0); // no GC barrier
|
||||
narrowOop archived_java_mirror_raw_narrow() NOT_CDS_JAVA_HEAP_RETURN_(narrowOop::null); // no GC barrier
|
||||
void set_archived_java_mirror_raw(oop m) NOT_CDS_JAVA_HEAP_RETURN; // no GC barrier
|
||||
|
||||
// Temporary mirror switch used by RedefineClasses
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -157,7 +157,7 @@ void* oopDesc::load_oop_raw(oop obj, int offset) {
|
||||
uintptr_t addr = (uintptr_t)(void*)obj + (uint)offset;
|
||||
if (UseCompressedOops) {
|
||||
narrowOop narrow_oop = *(narrowOop*)addr;
|
||||
if (narrow_oop == 0) return NULL;
|
||||
if (CompressedOops::is_null(narrow_oop)) return NULL;
|
||||
return (void*)CompressedOops::decode_raw(narrow_oop);
|
||||
} else {
|
||||
return *(void**)addr;
|
||||
|
@ -33,7 +33,8 @@
|
||||
// This hierarchy is a representation hierarchy, i.e. if A is a superclass
|
||||
// of B, A's representation is a prefix of B's representation.
|
||||
|
||||
typedef juint narrowOop; // Offset instead of address for an oop within a java object
|
||||
// Global offset instead of address for an oop within a java object.
|
||||
enum class narrowOop : uint32_t { null = 0 };
|
||||
|
||||
// If compressed klass pointers then use narrowKlass.
|
||||
typedef juint narrowKlass;
|
||||
|
@ -98,8 +98,12 @@ StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMap* r
|
||||
// The callee has no clue whether the register holds an int,
|
||||
// long or is unused. He always saves a long. Here we know
|
||||
// a long was saved, but we only want an int back. Narrow the
|
||||
// saved long to the int that the JVM wants.
|
||||
value.noop = (narrowOop) *(julong*) value_addr;
|
||||
// saved long to the int that the JVM wants. We can't just
|
||||
// use narrow_oop_cast directly, because we don't know what
|
||||
// the high bits of the value might be.
|
||||
static_assert(sizeof(narrowOop) == sizeof(juint), "size mismatch");
|
||||
juint narrow_value = (juint) *(julong*)value_addr;
|
||||
value.noop = CompressedOops::narrow_oop_cast(narrow_value);
|
||||
} else {
|
||||
value.noop = *(narrowOop*) value_addr;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user