8308396: Fix offset_of conversion warnings in runtime code

Reviewed-by: amitkumar, jsjolen, fparain
This commit is contained in:
Coleen Phillimore 2023-05-19 17:16:04 +00:00
parent a5343fa605
commit 265f40b4f7
85 changed files with 438 additions and 436 deletions

View File

@ -3884,7 +3884,7 @@ encode %{
// otherwise m->owner may contain a thread or a stack address.
//
// Try to CAS m->owner from NULL to current thread.
__ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value));
__ add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset())-markWord::monitor_value));
__ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
/*release*/ true, /*weak*/ false, rscratch1); // Sets flags for result
@ -3902,7 +3902,7 @@ encode %{
__ br(Assembler::NE, cont); // Check for recursive locking
// Recursive lock case
__ increment(Address(disp_hdr, ObjectMonitor::recursions_offset_in_bytes() - markWord::monitor_value), 1);
__ increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1);
// flag == EQ still from the cmp above, checking if this is a reentrant lock
__ bind(cont);
@ -3968,7 +3968,7 @@ encode %{
if (LockingMode == LM_LIGHTWEIGHT) {
// If the owner is anonymous, we need to fix it -- in an outline stub.
Register tmp2 = disp_hdr;
__ ldr(tmp2, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
__ ldr(tmp2, Address(tmp, ObjectMonitor::owner_offset()));
// We cannot use tbnz here, the target might be too far away and cannot
// be encoded.
__ tst(tmp2, (uint64_t)ObjectMonitor::ANONYMOUS_OWNER);
@ -3978,25 +3978,25 @@ encode %{
__ bind(stub->continuation());
}
__ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
__ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
Label notRecursive;
__ cbz(disp_hdr, notRecursive);
// Recursive lock
__ sub(disp_hdr, disp_hdr, 1u);
__ str(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
__ str(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
__ cmp(disp_hdr, disp_hdr); // Sets flags for result
__ b(cont);
__ bind(notRecursive);
__ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
__ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
__ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset()));
__ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset()));
__ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
__ cmp(rscratch1, zr); // Sets flags for result
__ cbnz(rscratch1, cont);
// need a release store here
__ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
__ lea(tmp, Address(tmp, ObjectMonitor::owner_offset()));
__ stlr(zr, tmp); // set unowned
__ bind(cont);

View File

@ -69,7 +69,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
verify_oop(obj);
// save object being locked into the BasicObjectLock
str(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
str(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
null_check_offset = offset();
@ -140,7 +140,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
}
// load object
ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
verify_oop(obj);
if (LockingMode == LM_LIGHTWEIGHT) {

View File

@ -78,7 +78,7 @@ void C2HandleAnonOMOwnerStub::emit(C2_MacroAssembler& masm) {
assert(t != noreg, "need tmp register");
// Fix owner to be the current thread.
__ str(rthread, Address(mon, ObjectMonitor::owner_offset_in_bytes()));
__ str(rthread, Address(mon, ObjectMonitor::owner_offset()));
// Pop owner object from lock-stack.
__ ldrw(t, Address(rthread, JavaThread::lock_stack_top_offset()));

View File

@ -276,8 +276,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
get_constant_pool(result);
// load pointer for resolved_references[] objArray
ldr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
ldr(result, Address(result, ConstantPool::cache_offset()));
ldr(result, Address(result, ConstantPoolCache::resolved_references_offset()));
resolve_oop_handle(result, tmp, rscratch2);
// Add in the index
add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
@ -288,7 +288,7 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset(
Register cpool, Register index, Register klass, Register temp) {
add(temp, cpool, index, LSL, LogBytesPerWord);
ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
ldr(klass, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses
ldr(klass, Address(cpool, ConstantPool::resolved_klasses_offset())); // klass = cpool->_resolved_klasses
add(klass, klass, temp, LSL, LogBytesPerWord);
ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
}
@ -589,7 +589,7 @@ void InterpreterMacroAssembler::remove_activation(
// register for unlock_object to pass to VM directly
lea(c_rarg1, monitor); // address of first monitor
ldr(r0, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
ldr(r0, Address(c_rarg1, BasicObjectLock::obj_offset()));
cbnz(r0, unlock);
pop(state);
@ -666,7 +666,7 @@ void InterpreterMacroAssembler::remove_activation(
bind(loop);
// check if current entry is used
ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset()));
cbnz(rscratch1, exception);
add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
@ -741,8 +741,8 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
const Register tmp = c_rarg2;
const Register obj_reg = c_rarg3; // Will contain the oop
const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
const int mark_offset = lock_offset +
BasicLock::displaced_header_offset_in_bytes();
@ -864,14 +864,14 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
if (LockingMode != LM_LIGHTWEIGHT) {
// Convert from BasicObjectLock structure to object and BasicLock
// structure Store the BasicLock address into %r0
lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
}
// Load oop into obj_reg(%c_rarg3)
ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
// Free entry
str(zr, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
if (LockingMode == LM_LIGHTWEIGHT) {
Label slow_case;
@ -906,7 +906,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
}
// Call the runtime routine for slow case.
str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj
str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
b(done);
@ -1805,7 +1805,7 @@ void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret,
cmp(rscratch1, (u1)Bytecodes::_invokehandle);
br(Assembler::EQ, do_profile);
get_method(tmp);
ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset_in_bytes()));
ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset()));
subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
br(Assembler::NE, profile_continue);

View File

@ -129,12 +129,12 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_constant_pool_cache(Register reg) {
get_constant_pool(reg);
ldr(reg, Address(reg, ConstantPool::cache_offset_in_bytes()));
ldr(reg, Address(reg, ConstantPool::cache_offset()));
}
void get_cpool_and_tags(Register cpool, Register tags) {
get_constant_pool(cpool);
ldr(tags, Address(cpool, ConstantPool::tags_offset_in_bytes()));
ldr(tags, Address(cpool, ConstantPool::tags_offset()));
}
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);

View File

@ -1143,7 +1143,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
// Compute start of first itableOffsetEntry (which is at the end of the vtable)
int vtable_base = in_bytes(Klass::vtable_start_offset());
int itentry_off = itableMethodEntry::method_offset_in_bytes();
int itentry_off = in_bytes(itableMethodEntry::method_offset());
int scan_step = itableOffsetEntry::size() * wordSize;
int vte_size = vtableEntry::size_in_bytes();
assert(vte_size == wordSize, "else adjust times_vte_scale");
@ -1171,7 +1171,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
// }
Label search, found_method;
ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
cmp(intf_klass, method_result);
br(Assembler::EQ, found_method);
bind(search);
@ -1179,9 +1179,9 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
// the receiver class doesn't implement the interface, and wasn't the
// same as when the caller was compiled.
cbz(method_result, L_no_such_interface);
if (itableOffsetEntry::interface_offset_in_bytes() != 0) {
if (itableOffsetEntry::interface_offset() != 0) {
add(scan_temp, scan_temp, scan_step);
ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
} else {
ldr(method_result, Address(pre(scan_temp, scan_step)));
}
@ -1192,7 +1192,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
// Got a hit.
if (return_method) {
ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset()));
ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0)));
}
}
@ -1201,10 +1201,9 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
void MacroAssembler::lookup_virtual_method(Register recv_klass,
RegisterOrConstant vtable_index,
Register method_result) {
const int base = in_bytes(Klass::vtable_start_offset());
assert(vtableEntry::size() * wordSize == 8,
"adjust the scaling in the code below");
int vtable_offset_in_bytes = base + vtableEntry::method_offset_in_bytes();
int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset());
if (vtable_index.is_register()) {
lea(method_result, Address(recv_klass,
@ -4311,7 +4310,7 @@ void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod)
void MacroAssembler::load_method_holder(Register holder, Register method) {
ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
ldr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
}
void MacroAssembler::load_klass(Register dst, Register src) {
@ -4348,7 +4347,7 @@ void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, R
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
ldr(dst, Address(rmethod, Method::const_offset()));
ldr(dst, Address(dst, ConstMethod::constants_offset()));
ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes()));
ldr(dst, Address(dst, ConstantPool::pool_holder_offset()));
ldr(dst, Address(dst, mirror_offset));
resolve_oop_handle(dst, tmp1, tmp2);
}

View File

@ -203,7 +203,7 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
Label L;
BLOCK_COMMENT("verify_intrinsic_id {");
__ ldrh(rscratch1, Address(rmethod, Method::intrinsic_id_offset_in_bytes()));
__ ldrh(rscratch1, Address(rmethod, Method::intrinsic_id_offset()));
__ subs(zr, rscratch1, (int) iid);
__ br(Assembler::EQ, L);
if (iid == vmIntrinsics::_linkToVirtual ||

View File

@ -1989,7 +1989,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// reset handle block
__ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
__ str(zr, Address(r2, JNIHandleBlock::top_offset_in_bytes()));
__ str(zr, Address(r2, JNIHandleBlock::top_offset()));
__ leave();
@ -2389,7 +2389,7 @@ void SharedRuntime::generate_deopt_blob() {
// Load UnrollBlock* into r5
__ mov(r5, r0);
__ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
__ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset()));
Label noException;
__ cmpw(rcpool, Deoptimization::Unpack_exception); // Was exception pending?
__ br(Assembler::NE, noException);
@ -2435,7 +2435,7 @@ void SharedRuntime::generate_deopt_blob() {
// when we are done the return to frame 3 will still be on the stack.
// Pop deoptimized frame
__ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
__ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
__ sub(r2, r2, 2 * wordSize);
__ add(sp, sp, r2);
__ ldp(rfp, lr, __ post(sp, 2 * wordSize));
@ -2446,20 +2446,20 @@ void SharedRuntime::generate_deopt_blob() {
// Compilers generate code that bang the stack by as much as the
// interpreter would need. So this stack banging should never
// trigger a fault. Verify that it does not on non product builds.
__ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
__ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
__ bang_stack_size(r19, r2);
#endif
// Load address of array of frame pcs into r2
__ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
__ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset()));
// Trash the old pc
// __ addptr(sp, wordSize); FIXME ????
// Load address of array of frame sizes into r4
__ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
__ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset()));
// Load counter into r3
__ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
__ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset()));
// Now adjust the caller's stack to make up for the extra locals
// but record the original sp so that we can save it in the skeletal interpreter
@ -2471,7 +2471,7 @@ void SharedRuntime::generate_deopt_blob() {
__ mov(sender_sp, sp);
__ ldrw(r19, Address(r5,
Deoptimization::UnrollBlock::
caller_adjustment_offset_in_bytes()));
caller_adjustment_offset()));
__ sub(sp, sp, r19);
// Push interpreter frames in a loop
@ -2631,7 +2631,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
#ifdef ASSERT
{ Label L;
__ ldrw(rscratch1, Address(r4, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
__ ldrw(rscratch1, Address(r4, Deoptimization::UnrollBlock::unpack_kind_offset()));
__ cmpw(rscratch1, (unsigned)Deoptimization::Unpack_uncommon_trap);
__ br(Assembler::EQ, L);
__ stop("SharedRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
@ -2652,7 +2652,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// Pop deoptimized frame (int)
__ ldrw(r2, Address(r4,
Deoptimization::UnrollBlock::
size_of_deoptimized_frame_offset_in_bytes()));
size_of_deoptimized_frame_offset()));
__ sub(r2, r2, 2 * wordSize);
__ add(sp, sp, r2);
__ ldp(rfp, lr, __ post(sp, 2 * wordSize));
@ -2665,23 +2665,23 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// trigger a fault. Verify that it does not on non product builds.
__ ldrw(r1, Address(r4,
Deoptimization::UnrollBlock::
total_frame_sizes_offset_in_bytes()));
total_frame_sizes_offset()));
__ bang_stack_size(r1, r2);
#endif
// Load address of array of frame pcs into r2 (address*)
__ ldr(r2, Address(r4,
Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
Deoptimization::UnrollBlock::frame_pcs_offset()));
// Load address of array of frame sizes into r5 (intptr_t*)
__ ldr(r5, Address(r4,
Deoptimization::UnrollBlock::
frame_sizes_offset_in_bytes()));
frame_sizes_offset()));
// Counter
__ ldrw(r3, Address(r4,
Deoptimization::UnrollBlock::
number_of_frames_offset_in_bytes())); // (int)
number_of_frames_offset())); // (int)
// Now adjust the caller's stack to make up for the extra locals but
// record the original sp so that we can save it in the skeletal
@ -2693,7 +2693,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ mov(sender_sp, sp);
__ ldrw(r1, Address(r4,
Deoptimization::UnrollBlock::
caller_adjustment_offset_in_bytes())); // (int)
caller_adjustment_offset())); // (int)
__ sub(sp, sp, r1);
// Push interpreter frames in a loop

View File

@ -812,7 +812,7 @@ void TemplateInterpreterGenerator::lock_method() {
__ str(rscratch1, Address(rfp, frame::interpreter_frame_extended_sp_offset * wordSize));
__ str(esp, monitor_block_top); // set new monitor block top
// store object
__ str(r0, Address(esp, BasicObjectLock::obj_offset_in_bytes()));
__ str(r0, Address(esp, BasicObjectLock::obj_offset()));
__ mov(c_rarg1, esp); // object address
__ lock_object(c_rarg1);
}
@ -858,7 +858,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
__ ldr(rcpool, Address(rmethod, Method::const_offset()));
__ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset()));
__ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset_in_bytes()));
__ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset()));
__ sub(rscratch1, rlocals, rfp);
__ lsr(rscratch1, rscratch1, Interpreter::logStackElementSize); // rscratch1 = rlocals - fp();
// Store relativized rlocals, see frame::interpreter_frame_locals().
@ -1434,7 +1434,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// reset handle block
__ ldr(t, Address(rthread, JavaThread::active_handles_offset()));
__ str(zr, Address(t, JNIHandleBlock::top_offset_in_bytes()));
__ str(zr, Address(t, JNIHandleBlock::top_offset()));
// If result is an oop unbox and store it in frame where gc will see it
// and result handler will pick it up
@ -1512,7 +1512,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
(intptr_t)(frame::interpreter_frame_initial_sp_offset *
wordSize - sizeof(BasicObjectLock))));
__ ldr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
__ ldr(t, Address(c_rarg1, BasicObjectLock::obj_offset()));
__ cbnz(t, unlock);
// Entry already unlocked, need to throw exception

View File

@ -3832,7 +3832,7 @@ void TemplateTable::monitorenter()
__ bind(loop);
// check if current entry is used
// if not used then remember entry in c_rarg1
__ ldr(rscratch1, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
__ ldr(rscratch1, Address(c_rarg3, BasicObjectLock::obj_offset()));
__ cmp(zr, rscratch1);
__ csel(c_rarg1, c_rarg3, c_rarg1, Assembler::EQ);
// check if current entry is for same object
@ -3892,7 +3892,7 @@ void TemplateTable::monitorenter()
__ increment(rbcp);
// store object
__ str(r0, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
__ str(r0, Address(c_rarg1, BasicObjectLock::obj_offset()));
__ lock_object(c_rarg1);
// check to make sure this monitor doesn't cause stack overflow after locking
@ -3931,7 +3931,7 @@ void TemplateTable::monitorexit()
__ bind(loop);
// check if current entry is for same object
__ ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
__ ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset()));
__ cmp(r0, rscratch1);
// if same object then stop searching
__ br(Assembler::EQ, found);

View File

@ -195,8 +195,8 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
const Register tmp2 = Rtemp; // Rtemp should be free at c1 LIR level
assert_different_registers(hdr, obj, disp_hdr, tmp2);
assert(BasicObjectLock::lock_offset_in_bytes() == 0, "adjust this code");
const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
assert(BasicObjectLock::lock_offset() == 0, "adjust this code");
const ByteSize obj_offset = BasicObjectLock::obj_offset();
const int mark_offset = BasicLock::displaced_header_offset_in_bytes();
// save object being locked into the BasicObjectLock
@ -266,8 +266,8 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
assert_different_registers(hdr, obj, disp_hdr, Rtemp);
Register tmp2 = Rtemp;
assert(BasicObjectLock::lock_offset_in_bytes() == 0, "adjust this code");
const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
assert(BasicObjectLock::lock_offset() == 0, "adjust this code");
const ByteSize obj_offset = BasicObjectLock::obj_offset();
const int mark_offset = BasicLock::displaced_header_offset_in_bytes();
Label done;

View File

@ -270,8 +270,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
Register cache = result;
// load pointer for resolved_references[] objArray
ldr(cache, Address(result, ConstantPool::cache_offset_in_bytes()));
ldr(cache, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
ldr(cache, Address(result, ConstantPool::cache_offset()));
ldr(cache, Address(result, ConstantPoolCache::resolved_references_offset()));
resolve_oop_handle(cache);
// Add in the index
// convert from field index to resolved_references() index and from
@ -285,7 +285,7 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset(
Register Rcpool, Register Rindex, Register Rklass) {
add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
ldrh(Rtemp, Address(Rtemp, sizeof(ConstantPool))); // Rtemp = resolved_klass_index
ldr(Rklass, Address(Rcpool, ConstantPool::resolved_klasses_offset_in_bytes())); // Rklass = cpool->_resolved_klasses
ldr(Rklass, Address(Rcpool, ConstantPool::resolved_klasses_offset())); // Rklass = cpool->_resolved_klasses
add(Rklass, Rklass, AsmOperand(Rtemp, lsl, LogBytesPerWord));
ldr(Rklass, Address(Rklass, Array<Klass*>::base_offset_in_bytes()));
}
@ -755,7 +755,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
// address of first monitor
sub(Rmonitor, FP, - frame::interpreter_frame_monitor_block_bottom_offset * wordSize + (int)sizeof(BasicObjectLock));
ldr(Robj, Address(Rmonitor, BasicObjectLock::obj_offset_in_bytes()));
ldr(Robj, Address(Rmonitor, BasicObjectLock::obj_offset()));
cbnz(Robj, unlock);
pop(state);
@ -826,7 +826,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
// points to word before bottom of monitor block
cmp(Rcur, Rbottom); // check if there are no monitors
ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset()), ne);
// prefetch monitor's object
b(no_unlock, eq);
@ -836,7 +836,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
add(Rcur, Rcur, entry_size); // otherwise advance to next entry
cmp(Rcur, Rbottom); // check if bottom reached
ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset()), ne);
// prefetch monitor's object
b(loop, ne); // if not at bottom then check this entry
}
@ -894,8 +894,8 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
const Register Rmark = R3;
assert_different_registers(Robj, Rmark, Rlock, R0, Rtemp);
const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
Label already_locked, slow_case;
@ -1011,8 +1011,8 @@ void InterpreterMacroAssembler::unlock_object(Register Rlock) {
const Register Rmark = R3;
assert_different_registers(Robj, Rmark, Rlock, Rtemp);
const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
const Register Rzero = zero_register(Rtemp);

View File

@ -82,8 +82,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Helpers for runtime call arguments/results
void get_const(Register reg) { ldr(reg, Address(Rmethod, Method::const_offset())); }
void get_constant_pool(Register reg) { get_const(reg); ldr(reg, Address(reg, ConstMethod::constants_offset())); }
void get_constant_pool_cache(Register reg) { get_constant_pool(reg); ldr(reg, Address(reg, ConstantPool::cache_offset_in_bytes())); }
void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); ldr(tags, Address(cpool, ConstantPool::tags_offset_in_bytes())); }
void get_constant_pool_cache(Register reg) { get_constant_pool(reg); ldr(reg, Address(reg, ConstantPool::cache_offset())); }
void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); ldr(tags, Address(cpool, ConstantPool::tags_offset())); }
// Sets reg. Blows Rtemp.
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);

View File

@ -94,7 +94,7 @@ void AddressLiteral::set_rspec(relocInfo::relocType rtype) {
void MacroAssembler::lookup_virtual_method(Register recv_klass,
Register vtable_index,
Register method_result) {
const int base_offset = in_bytes(Klass::vtable_start_offset()) + vtableEntry::method_offset_in_bytes();
const ByteSize base_offset = Klass::vtable_start_offset() + vtableEntry::method_offset();
assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
add(recv_klass, recv_klass, AsmOperand(vtable_index, lsl, LogBytesPerWord));
ldr(method_result, Address(recv_klass, base_offset));
@ -1380,7 +1380,7 @@ void MacroAssembler::lookup_interface_method(Register Rklass,
assert_different_registers(Rklass, Rintf, Rscan, Rtmp);
const int entry_size = itableOffsetEntry::size() * HeapWordSize;
assert(itableOffsetEntry::interface_offset_in_bytes() == 0, "not added for convenience");
assert(itableOffsetEntry::interface_offset() == 0, "not added for convenience");
// Compute start of first itableOffsetEntry (which is at the end of the vtable)
const int base = in_bytes(Klass::vtable_start_offset());
@ -1404,15 +1404,15 @@ void MacroAssembler::lookup_interface_method(Register Rklass,
if (method_result != noreg) {
// Interface found at previous position of Rscan, now load the method
ldr_s32(Rtmp, Address(Rscan, itableOffsetEntry::offset_offset_in_bytes() - entry_size));
ldr_s32(Rtmp, Address(Rscan, in_bytes(itableOffsetEntry::offset_offset()) - entry_size));
if (itable_index.is_register()) {
add(Rtmp, Rtmp, Rklass); // Add offset to Klass*
assert(itableMethodEntry::size() * HeapWordSize == wordSize, "adjust the scaling in the code below");
assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust the offset in the code below");
assert(itableMethodEntry::method_offset() == 0, "adjust the offset in the code below");
ldr(method_result, Address::indexed_ptr(Rtmp, itable_index.as_register()));
} else {
int method_offset = itableMethodEntry::size() * HeapWordSize * itable_index.as_constant() +
itableMethodEntry::method_offset_in_bytes();
in_bytes(itableMethodEntry::method_offset());
add_slow(method_result, Rklass, method_offset);
ldr(method_result, Address(method_result, Rtmp));
}
@ -1643,7 +1643,7 @@ void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp)
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
ldr(tmp, Address(method, Method::const_offset()));
ldr(tmp, Address(tmp, ConstMethod::constants_offset()));
ldr(tmp, Address(tmp, ConstantPool::pool_holder_offset_in_bytes()));
ldr(tmp, Address(tmp, ConstantPool::pool_holder_offset()));
ldr(mirror, Address(tmp, mirror_offset));
resolve_oop_handle(mirror);
}

View File

@ -218,7 +218,7 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
if (VerifyMethodHandles) {
Label L;
BLOCK_COMMENT("verify_intrinsic_id {");
__ ldrh(rdi_temp, Address(rbx_method, Method::intrinsic_id_offset_in_bytes()));
__ ldrh(rdi_temp, Address(rbx_method, Method::intrinsic_id_offset()));
__ sub_slow(rdi_temp, rdi_temp, (int) iid);
__ cbz(rdi_temp, L);
if (iid == vmIntrinsics::_linkToVirtual ||

View File

@ -1262,7 +1262,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ ldr(LR, Address(Rthread, JavaThread::active_handles_offset()));
__ reset_last_Java_frame(Rtemp); // sets Rtemp to 0 on 32-bit ARM
__ str_32(Rtemp, Address(LR, JNIHandleBlock::top_offset_in_bytes()));
__ str_32(Rtemp, Address(LR, JNIHandleBlock::top_offset()));
if (CheckJNICalls) {
__ str(__ zero_register(Rtemp), Address(Rthread, JavaThread::pending_jni_exception_check_fn_offset()));
}
@ -1450,7 +1450,7 @@ void SharedRuntime::generate_deopt_blob() {
__ mov(Rublock, R0);
// Reload Rkind from the UnrollBlock (might have changed)
__ ldr_s32(Rkind, Address(Rublock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
__ ldr_s32(Rkind, Address(Rublock, Deoptimization::UnrollBlock::unpack_kind_offset()));
Label noException;
__ cmp_32(Rkind, Deoptimization::Unpack_exception); // Was exception pending?
__ b(noException, ne);
@ -1484,9 +1484,9 @@ void SharedRuntime::generate_deopt_blob() {
__ add(SP, SP, RegisterSaver::reg_save_size * wordSize);
// Set initial stack state before pushing interpreter frames
__ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
__ ldr(R2, Address(Rublock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
__ ldr(R3, Address(Rublock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
__ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
__ ldr(R2, Address(Rublock, Deoptimization::UnrollBlock::frame_pcs_offset()));
__ ldr(R3, Address(Rublock, Deoptimization::UnrollBlock::frame_sizes_offset()));
__ add(SP, SP, Rtemp);
@ -1502,11 +1502,11 @@ void SharedRuntime::generate_deopt_blob() {
// propagated to the caller of the deoptimized method. Need to get the pc
// from the caller in LR and restore FP.
__ ldr(LR, Address(R2, 0));
__ ldr(FP, Address(Rublock, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
__ ldr_s32(R8, Address(Rublock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
__ ldr(FP, Address(Rublock, Deoptimization::UnrollBlock::initial_info_offset()));
__ ldr_s32(R8, Address(Rublock, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
__ arm_stack_overflow_check(R8, Rtemp);
#endif
__ ldr_s32(R8, Address(Rublock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
__ ldr_s32(R8, Address(Rublock, Deoptimization::UnrollBlock::number_of_frames_offset()));
// Pick up the initial fp we should save
// XXX Note: was ldr(FP, Address(FP));
@ -1518,9 +1518,9 @@ void SharedRuntime::generate_deopt_blob() {
// Hence, ldr(FP, Address(FP)) is probably not correct. For x86,
// Deoptimization::fetch_unroll_info computes the right FP value and
// stores it in Rublock.initial_info. This has been activated for ARM.
__ ldr(FP, Address(Rublock, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
__ ldr(FP, Address(Rublock, Deoptimization::UnrollBlock::initial_info_offset()));
__ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
__ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::caller_adjustment_offset()));
__ mov(Rsender, SP);
__ sub(SP, SP, Rtemp);
@ -1561,7 +1561,7 @@ void SharedRuntime::generate_deopt_blob() {
#ifdef ASSERT
// Reload Rkind from the UnrollBlock and check that it was not overwritten (Rkind is not callee-saved)
{ Label L;
__ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
__ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::unpack_kind_offset()));
__ cmp_32(Rkind, Rtemp);
__ b(L, eq);
__ stop("Rkind was overwritten");
@ -1671,7 +1671,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
#ifdef ASSERT
{ Label L;
__ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
__ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::unpack_kind_offset()));
__ cmp_32(Rtemp, Deoptimization::Unpack_uncommon_trap);
__ b(L, eq);
__ stop("SharedRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
@ -1681,9 +1681,9 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// Set initial stack state before pushing interpreter frames
__ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
__ ldr(R2, Address(Rublock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
__ ldr(R3, Address(Rublock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
__ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
__ ldr(R2, Address(Rublock, Deoptimization::UnrollBlock::frame_pcs_offset()));
__ ldr(R3, Address(Rublock, Deoptimization::UnrollBlock::frame_sizes_offset()));
__ add(SP, SP, Rtemp);
@ -1699,16 +1699,16 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// propagated to the caller of the deoptimized method. Need to get the pc
// from the caller in LR and restore FP.
__ ldr(LR, Address(R2, 0));
__ ldr(FP, Address(Rublock, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
__ ldr_s32(R8, Address(Rublock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
__ ldr(FP, Address(Rublock, Deoptimization::UnrollBlock::initial_info_offset()));
__ ldr_s32(R8, Address(Rublock, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
__ arm_stack_overflow_check(R8, Rtemp);
#endif
__ ldr_s32(R8, Address(Rublock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
__ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
__ ldr_s32(R8, Address(Rublock, Deoptimization::UnrollBlock::number_of_frames_offset()));
__ ldr_s32(Rtemp, Address(Rublock, Deoptimization::UnrollBlock::caller_adjustment_offset()));
__ mov(Rsender, SP);
__ sub(SP, SP, Rtemp);
// __ ldr(FP, Address(FP));
__ ldr(FP, Address(Rublock, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
__ ldr(FP, Address(Rublock, Deoptimization::UnrollBlock::initial_info_offset()));
// Push interpreter frames in a loop
Label loop;

View File

@ -599,7 +599,7 @@ void TemplateInterpreterGenerator::lock_method() {
// add space for a monitor entry
__ str(Rstack_top, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
// set new monitor block top
__ str(R0, Address(Rstack_top, BasicObjectLock::obj_offset_in_bytes()));
__ str(R0, Address(Rstack_top, BasicObjectLock::obj_offset()));
// store object
__ mov(R1, Rstack_top); // monitor entry address
__ lock_object(R1);
@ -658,7 +658,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
__ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
__ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
__ ldr(Rtemp, Address(Rtemp, ConstantPool::cache_offset_in_bytes()));
__ ldr(Rtemp, Address(Rtemp, ConstantPool::cache_offset()));
__ push(Rtemp); // set constant pool cache
__ sub(Rtemp, Rlocals, FP);
__ logical_shift_right(Rtemp, Rtemp, Interpreter::logStackElementSize); // Rtemp = Rlocals - fp();
@ -1054,7 +1054,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// Zero handles and last_java_sp
__ reset_last_Java_frame(Rtemp);
__ ldr(R3, Address(Rthread, JavaThread::active_handles_offset()));
__ str_32(__ zero_register(Rtemp), Address(R3, JNIHandleBlock::top_offset_in_bytes()));
__ str_32(__ zero_register(Rtemp), Address(R3, JNIHandleBlock::top_offset()));
if (CheckJNICalls) {
__ str(__ zero_register(Rtemp), Address(Rthread, JavaThread::pending_jni_exception_check_fn_offset()));
}

View File

@ -3689,10 +3689,10 @@ void TemplateTable::invokevirtual_helper(Register index,
__ profile_virtual_call(R0_tmp, recv_klass);
// get target Method* & entry point
const int base = in_bytes(Klass::vtable_start_offset());
const ByteSize base = Klass::vtable_start_offset();
assert(vtableEntry::size() == 1, "adjust the scaling in the code below");
__ add(Rtemp, recv_klass, AsmOperand(index, lsl, LogHeapWordSize));
__ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset_in_bytes()));
__ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset()));
__ jump_from_interpreted(Rmethod);
}
@ -3801,7 +3801,7 @@ void TemplateTable::invokeinterface(int byte_no) {
// Get declaring interface class from method
__ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
__ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
__ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset_in_bytes()));
__ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset()));
// Get itable index from method
__ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset()));
@ -4290,7 +4290,7 @@ void TemplateTable::monitorenter() {
// points to word before bottom of monitor block
__ cmp(Rcur, Rbottom); // check if there are no monitors
__ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
__ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset()), ne);
// prefetch monitor's object for the first iteration
__ b(allocate_monitor, eq); // there are no monitors, skip searching
@ -4304,7 +4304,7 @@ void TemplateTable::monitorenter() {
__ add(Rcur, Rcur, entry_size); // otherwise advance to next entry
__ cmp(Rcur, Rbottom); // check if bottom reached
__ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
__ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset()), ne);
// prefetch monitor's object for the next iteration
__ b(loop, ne); // if not at bottom then check this entry
__ bind(exit);
@ -4357,7 +4357,7 @@ void TemplateTable::monitorenter() {
// The object has already been popped from the stack, so the expression stack looks correct.
__ add(Rbcp, Rbcp, 1);
__ str(Robj, Address(Rentry, BasicObjectLock::obj_offset_in_bytes())); // store object
__ str(Robj, Address(Rentry, BasicObjectLock::obj_offset())); // store object
__ lock_object(Rentry);
// check to make sure this monitor doesn't cause stack overflow after locking
@ -4394,7 +4394,7 @@ void TemplateTable::monitorexit() {
// points to word before bottom of monitor block
__ cmp(Rcur, Rbottom); // check if bottom reached
__ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
__ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset()), ne);
// prefetch monitor's object for the first iteration
__ b(throw_exception, eq); // throw exception if there are now monitors
@ -4404,7 +4404,7 @@ void TemplateTable::monitorexit() {
__ b(found, eq); // if same object then stop searching
__ add(Rcur, Rcur, entry_size); // otherwise advance to next entry
__ cmp(Rcur, Rbottom); // check if bottom reached
__ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
__ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset()), ne);
__ b (loop, ne); // if not at bottom then check this entry
}

View File

@ -89,7 +89,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
start_pc = __ pc();
{ // lookup virtual method
int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index * vtableEntry::size_in_bytes();
int method_offset = vtableEntry::method_offset_in_bytes() + entry_offset;
int method_offset = in_bytes(vtableEntry::method_offset()) + entry_offset;
assert ((method_offset & (wordSize - 1)) == 0, "offset should be aligned");
int offset_mask = 0xfff;

View File

@ -1748,6 +1748,7 @@ class Assembler : public AbstractAssembler {
// 8 bytes
inline void ldx( Register d, Register s1, Register s2);
inline void ld( Register d, int si16, Register s1);
inline void ld( Register d, ByteSize si16, Register s1);
inline void ldu( Register d, int si16, Register s1);
// 8 bytes reversed
@ -2486,6 +2487,7 @@ class Assembler : public AbstractAssembler {
inline void lbz( Register d, int si16);
inline void ldx( Register d, Register s2);
inline void ld( Register d, int si16);
inline void ld( Register d, ByteSize si16);
inline void ldbrx(Register d, Register s2);
inline void stwx( Register d, Register s2);
inline void stw( Register d, int si16);

View File

@ -361,6 +361,7 @@ inline void Assembler::lbz( Register d, int si16, Register s1) { emit_int32(
inline void Assembler::lbzu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LBZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::ld( Register d, int si16, Register s1) { emit_int32(LD_OPCODE | rt(d) | ds(si16) | ra0mem(s1));}
inline void Assembler::ld( Register d, ByteSize si16, Register s1) { assert(in_bytes(si16) < 0x7fff, "overflow"); ld(d, in_bytes(si16), s1); }
inline void Assembler::ldx( Register d, Register s1, Register s2) { emit_int32(LDX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::ldu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LDU_OPCODE | rt(d) | ds(si16) | rta0mem(s1));}
inline void Assembler::ldbrx( Register d, Register s1, Register s2) { emit_int32(LDBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
@ -1079,6 +1080,7 @@ inline void Assembler::lhbrx(Register d, Register s2) { emit_int32( LHBRX_OPCODE
inline void Assembler::lbzx( Register d, Register s2) { emit_int32( LBZX_OPCODE | rt(d) | rb(s2));}
inline void Assembler::lbz( Register d, int si16 ) { emit_int32( LBZ_OPCODE | rt(d) | d1(si16));}
inline void Assembler::ld( Register d, int si16 ) { emit_int32( LD_OPCODE | rt(d) | ds(si16));}
inline void Assembler::ld( Register d, ByteSize si16) { assert(in_bytes(si16) < 0x7fff, "overflow"); ld(d, in_bytes(si16)); }
inline void Assembler::ldx( Register d, Register s2) { emit_int32( LDX_OPCODE | rt(d) | rb(s2));}
inline void Assembler::ldbrx(Register d, Register s2) { emit_int32( LDBRX_OPCODE| rt(d) | rb(s2));}
inline void Assembler::stwx( Register d, Register s2) { emit_int32( STWX_OPCODE | rs(d) | rb(s2));}

View File

@ -105,7 +105,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
verify_oop(Roop, FILE_AND_LINE);
// Save object being locked into the BasicObjectLock...
std(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
std(Roop, in_bytes(BasicObjectLock::obj_offset()), Rbox);
if (DiagnoseSyncOnValueBasedClasses != 0) {
load_klass(Rscratch, Roop);
@ -167,7 +167,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
beq(CCR0, done);
// Load object.
ld(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
ld(Roop, in_bytes(BasicObjectLock::obj_offset()), Rbox);
verify_oop(Roop, FILE_AND_LINE);
// Check if it is still a light weight lock, this is is true if we see

View File

@ -505,8 +505,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
// word index to byte offset. Since this is a java object, it can be compressed.
sldi(index, index, LogBytesPerHeapOop);
// Load pointer for resolved_references[] objArray.
ld(result, ConstantPool::cache_offset_in_bytes(), result);
ld(result, ConstantPoolCache::resolved_references_offset_in_bytes(), result);
ld(result, ConstantPool::cache_offset(), result);
ld(result, ConstantPoolCache::resolved_references_offset(), result);
resolve_oop_handle(result, tmp1, tmp2, MacroAssembler::PRESERVATION_NONE);
#ifdef ASSERT
Label index_ok;
@ -536,7 +536,7 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register Rcpool, R
lhz(Roffset, sizeof(ConstantPool) + 2, Roffset); // Roffset = resolved_klass_index
#endif
ld(Rklass, ConstantPool::resolved_klasses_offset_in_bytes(), Rcpool); // Rklass = Rcpool->_resolved_klasses
ld(Rklass, ConstantPool::resolved_klasses_offset(), Rcpool); // Rklass = Rcpool->_resolved_klasses
sldi(Roffset, Roffset, LogBytesPerWord);
addi(Roffset, Roffset, Array<Klass*>::base_offset_in_bytes());
@ -646,12 +646,12 @@ void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {
void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) {
get_constant_pool(Rdst);
ld(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst);
ld(Rdst, ConstantPool::cache_offset(), Rdst);
}
void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) {
get_constant_pool(Rcpool);
ld(Rtags, ConstantPool::tags_offset_in_bytes(), Rcpool);
ld(Rtags, ConstantPool::tags_offset(), Rcpool);
}
// Unlock if synchronized method.
@ -701,7 +701,7 @@ void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
addi(Rmonitor_base, Rmonitor_base,
-(frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base
ld(R0, BasicObjectLock::obj_offset_in_bytes(), Rmonitor_base);
ld(R0, BasicObjectLock::obj_offset(), Rmonitor_base);
cmpdi(CCR0, R0, 0);
bne(CCR0, Lunlock);
@ -740,7 +740,7 @@ void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
ble(CCR0, Lno_unlock);
addi(Rcurrent_obj_addr, Rmonitor_base,
BasicObjectLock::obj_offset_in_bytes() - frame::interpreter_frame_monitor_size_in_bytes());
in_bytes(BasicObjectLock::obj_offset()) - frame::interpreter_frame_monitor_size_in_bytes());
// Check if any monitor is on stack, bail out if not
srdi(Riterations, Riterations, exact_log2(delta));
mtctr(Riterations);
@ -775,7 +775,7 @@ void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
// Stack unrolling. Unlock object and if requested, install illegal_monitor_exception.
// Unlock does not block, so don't have to worry about the frame.
Register Rmonitor_addr = R11_scratch1;
addi(Rmonitor_addr, Rcurrent_obj_addr, -BasicObjectLock::obj_offset_in_bytes() + delta);
addi(Rmonitor_addr, Rcurrent_obj_addr, -in_bytes(BasicObjectLock::obj_offset()) + delta);
unlock_object(Rmonitor_addr);
if (install_monitor_exception) {
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
@ -964,10 +964,12 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
ori(displaced_header, displaced_header, markWord::unlocked_value);
// monitor->lock()->set_displaced_header(displaced_header);
const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
const int mark_offset = lock_offset +
BasicLock::displaced_header_offset_in_bytes();
// Initialize the box (Must happen before we update the object mark!).
std(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
BasicLock::displaced_header_offset_in_bytes(), monitor);
std(displaced_header, mark_offset, monitor);
// if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
@ -1008,8 +1010,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
// If condition is true we are done and hence we can store 0 in the displaced
// header indicating it is a recursive lock.
bne(CCR0, slow_case);
std(R0/*==0!*/, BasicObjectLock::lock_offset_in_bytes() +
BasicLock::displaced_header_offset_in_bytes(), monitor);
std(R0/*==0!*/, mark_offset, monitor);
b(count_locking);
// } else {
@ -1065,8 +1066,8 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) {
assert_different_registers(object, displaced_header, object_mark_addr, current_header);
// Test first if we are in the fast recursive case.
ld(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
BasicLock::displaced_header_offset_in_bytes(), monitor);
ld(displaced_header, in_bytes(BasicObjectLock::lock_offset()) +
BasicLock::displaced_header_offset_in_bytes(), monitor);
// If the displaced header is zero, we have a recursive unlock.
cmpdi(CCR0, displaced_header, 0);
@ -1079,7 +1080,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) {
// If we still have a lightweight lock, unlock the object and be done.
// The object address from the monitor is in object.
ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor);
ld(object, in_bytes(BasicObjectLock::obj_offset()), monitor);
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
// We have the displaced header in displaced_header. If the lock is still
@ -1113,7 +1114,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) {
align(32, 12);
bind(free_slot);
li(R0, 0);
std(R0, BasicObjectLock::obj_offset_in_bytes(), monitor);
std(R0, in_bytes(BasicObjectLock::obj_offset()), monitor);
dec_held_monitor_count(current_header /*tmp*/);
bind(done);
}
@ -1861,7 +1862,7 @@ void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1,
// type because we're right after it and we don't known its
// length.
lbz(tmp1, 0, R14_bcp);
lbz(tmp2, Method::intrinsic_id_offset_in_bytes(), R19_method);
lbz(tmp2, in_bytes(Method::intrinsic_id_offset()), R19_method);
cmpwi(CCR0, tmp1, Bytecodes::_invokedynamic);
cmpwi(CCR1, tmp1, Bytecodes::_invokehandle);
cror(CCR0, Assembler::equal, CCR1, Assembler::equal);

View File

@ -1789,7 +1789,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
// Compute start of first itableOffsetEntry (which is at the end of the vtable).
int vtable_base = in_bytes(Klass::vtable_start_offset());
int itentry_off = itableMethodEntry::method_offset_in_bytes();
int itentry_off = in_bytes(itableMethodEntry::method_offset());
int logMEsize = exact_log2(itableMethodEntry::size() * wordSize);
int scan_step = itableOffsetEntry::size() * wordSize;
int log_vte_size= exact_log2(vtableEntry::size_in_bytes());
@ -1826,7 +1826,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
for (int peel = 1; peel >= 0; peel--) {
// %%%% Could load both offset and interface in one ldx, if they were
// in the opposite order. This would save a load.
ld(temp2, itableOffsetEntry::interface_offset_in_bytes(), scan_temp);
ld(temp2, in_bytes(itableOffsetEntry::interface_offset()), scan_temp);
// Check that this entry is non-null. A null entry means that
// the receiver class doesn't implement the interface, and wasn't the
@ -1853,7 +1853,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
// Got a hit.
if (return_method) {
int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
int ito_offset = in_bytes(itableOffsetEntry::offset_offset());
lwz(scan_temp, ito_offset, scan_temp);
ldx(method_result, scan_temp, method_result);
}
@ -1866,7 +1866,7 @@ void MacroAssembler::lookup_virtual_method(Register recv_klass,
assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
const int base = in_bytes(Klass::vtable_start_offset());
const ByteSize base = Klass::vtable_start_offset();
assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
if (vtable_index.is_register()) {
@ -1875,7 +1875,7 @@ void MacroAssembler::lookup_virtual_method(Register recv_klass,
} else {
addi(recv_klass, recv_klass, vtable_index.as_constant() << LogBytesPerWord);
}
ld(R19_method, base + vtableEntry::method_offset_in_bytes(), recv_klass);
ld(R19_method, in_bytes(base + vtableEntry::method_offset()), recv_klass);
}
/////////////////////////////////////////// subtype checking ////////////////////////////////////////////
@ -2353,7 +2353,7 @@ void MacroAssembler::rtm_abort_ratio_calculation(Register rtm_counters_Reg,
// Set rtm_state to "no rtm" in MDO.
// Not using a metadata relocation. Method and Class Loader are kept alive anyway.
// (See nmethod::metadata_do and CodeBuffer::finalize_oop_references.)
load_const(R0, (address)method_data + MethodData::rtm_state_offset_in_bytes(), tmpReg);
load_const(R0, (address)method_data + in_bytes(MethodData::rtm_state_offset()), tmpReg);
atomic_ori_int(R0, tmpReg, NoRTM);
}
b(L_done);
@ -2373,7 +2373,7 @@ void MacroAssembler::rtm_abort_ratio_calculation(Register rtm_counters_Reg,
if (method_data != nullptr) {
// Set rtm_state to "always rtm" in MDO.
// Not using a metadata relocation. See above.
load_const(R0, (address)method_data + MethodData::rtm_state_offset_in_bytes(), tmpReg);
load_const(R0, (address)method_data + in_bytes(MethodData::rtm_state_offset()), tmpReg);
atomic_ori_int(R0, tmpReg, UseRTM);
}
bind(L_done);
@ -2546,7 +2546,7 @@ void MacroAssembler::rtm_inflated_locking(ConditionRegister flag,
assert(UseRTMLocking, "why call this otherwise?");
Label L_rtm_retry, L_decrement_retry, L_on_abort;
// Clean monitor_value bit to get valid pointer.
int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markWord::monitor_value;
int owner_offset = in_bytes(ObjectMonitor::owner_offset()) - markWord::monitor_value;
// Store non-null, using boxReg instead of (intptr_t)markWord::unused_mark().
std(boxReg, BasicLock::displaced_header_offset_in_bytes(), boxReg);
@ -2719,7 +2719,7 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
#endif // INCLUDE_RTM_OPT
// Try to CAS m->owner from null to current thread.
addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markWord::monitor_value);
addi(temp, displaced_header, in_bytes(ObjectMonitor::owner_offset()) - markWord::monitor_value);
cmpxchgd(/*flag=*/flag,
/*current_value=*/current_header,
/*compare_value=*/(intptr_t)0,
@ -2738,9 +2738,9 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
// Current thread already owns the lock. Just increment recursions.
Register recursions = displaced_header;
ld(recursions, ObjectMonitor::recursions_offset_in_bytes()-ObjectMonitor::owner_offset_in_bytes(), temp);
ld(recursions, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), temp);
addi(recursions, recursions, 1);
std(recursions, ObjectMonitor::recursions_offset_in_bytes()-ObjectMonitor::owner_offset_in_bytes(), temp);
std(recursions, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), temp);
#if INCLUDE_RTM_OPT
} // use_rtm()
@ -2817,7 +2817,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
bind(object_has_monitor);
STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
addi(current_header, current_header, -(int)markWord::monitor_value); // monitor
ld(temp, ObjectMonitor::owner_offset_in_bytes(), current_header);
ld(temp, in_bytes(ObjectMonitor::owner_offset()), current_header);
// It's inflated.
#if INCLUDE_RTM_OPT
@ -2832,24 +2832,24 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
}
#endif
ld(displaced_header, ObjectMonitor::recursions_offset_in_bytes(), current_header);
ld(displaced_header, in_bytes(ObjectMonitor::recursions_offset()), current_header);
cmpd(flag, temp, R16_thread);
bne(flag, failure);
addic_(displaced_header, displaced_header, -1);
blt(CCR0, notRecursive); // Not recursive if negative after decrement.
std(displaced_header, ObjectMonitor::recursions_offset_in_bytes(), current_header);
std(displaced_header, in_bytes(ObjectMonitor::recursions_offset()), current_header);
b(success); // flag is already EQ here.
bind(notRecursive);
ld(temp, ObjectMonitor::EntryList_offset_in_bytes(), current_header);
ld(displaced_header, ObjectMonitor::cxq_offset_in_bytes(), current_header);
ld(temp, in_bytes(ObjectMonitor::EntryList_offset()), current_header);
ld(displaced_header, in_bytes(ObjectMonitor::cxq_offset()), current_header);
orr(temp, temp, displaced_header); // Will be 0 if both are 0.
cmpdi(flag, temp, 0);
bne(flag, failure);
release();
std(temp, ObjectMonitor::owner_offset_in_bytes(), current_header);
std(temp, in_bytes(ObjectMonitor::owner_offset()), current_header);
// flag == EQ indicates success, decrement held monitor count
// flag == NE indicates failure
@ -3085,7 +3085,7 @@ void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Registe
void MacroAssembler::load_method_holder(Register holder, Register method) {
ld(holder, in_bytes(Method::const_offset()), method);
ld(holder, in_bytes(ConstMethod::constants_offset()), holder);
ld(holder, ConstantPool::pool_holder_offset_in_bytes(), holder);
ld(holder, ConstantPool::pool_holder_offset(), holder);
}
// Clear Array

View File

@ -247,7 +247,7 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
Label L;
BLOCK_COMMENT("verify_intrinsic_id {");
__ load_sized_value(R30_tmp1, Method::intrinsic_id_offset_in_bytes(), R19_method,
__ load_sized_value(R30_tmp1, in_bytes(Method::intrinsic_id_offset()), R19_method,
sizeof(u2), /*is_signed*/ false);
__ cmpwi(CCR1, R30_tmp1, (int) iid);
__ beq(CCR1, L);

View File

@ -3603,7 +3603,7 @@ encode %{
__ load_klass(R11_scratch1, R3);
int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index * vtableEntry::size_in_bytes();
int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
int v_off = entry_offset + in_bytes(vtableEntry::method_offset());
__ li(R19_method, v_off);
__ ldx(R19_method/*method*/, R19_method/*method offset*/, R11_scratch1/*class*/);
// NOTE: for vtable dispatches, the vtable entry will never be

View File

@ -2744,7 +2744,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ ld(r_temp_1, thread_(active_handles));
// TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
__ li(r_temp_2, 0);
__ stw(r_temp_2, JNIHandleBlock::top_offset_in_bytes(), r_temp_1);
__ stw(r_temp_2, in_bytes(JNIHandleBlock::top_offset()), r_temp_1);
// Check for pending exceptions.
@ -2856,13 +2856,13 @@ static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
// _number_of_frames is of type int (deoptimization.hpp)
__ lwa(number_of_frames_reg,
Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(),
in_bytes(Deoptimization::UnrollBlock::number_of_frames_offset()),
unroll_block_reg);
__ ld(pcs_reg,
Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(),
in_bytes(Deoptimization::UnrollBlock::frame_pcs_offset()),
unroll_block_reg);
__ ld(frame_sizes_reg,
Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(),
in_bytes(Deoptimization::UnrollBlock::frame_sizes_offset()),
unroll_block_reg);
// stack: (caller_of_deoptee, ...).
@ -2888,7 +2888,7 @@ static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
// into a valid PARENT_IJAVA_FRAME_ABI.
__ lwa(R11_scratch1,
Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(),
in_bytes(Deoptimization::UnrollBlock::caller_adjustment_offset()),
unroll_block_reg);
__ neg(R11_scratch1, R11_scratch1);
@ -3069,7 +3069,7 @@ void SharedRuntime::generate_deopt_blob() {
RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes);
// reload the exec mode from the UnrollBlock (it might have changed)
__ lwz(exec_mode_reg, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), unroll_block_reg);
__ lwz(exec_mode_reg, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg);
// In excp_deopt_mode, restore and clear exception oop which we
// stored in the thread during exception entry above. The exception
// oop will be the return value of this stub.
@ -3096,7 +3096,7 @@ void SharedRuntime::generate_deopt_blob() {
// If not compiled the loaded value is equal to the current SP (see frame::initial_deoptimization_info())
// and the frame is effectively not resized.
Register caller_sp = R23_tmp3;
__ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset_in_bytes(), unroll_block_reg);
__ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset(), unroll_block_reg);
__ resize_frame_absolute(caller_sp, R24_tmp4, R25_tmp5);
// Loop through the `UnrollBlock' info and create interpreter frames.
@ -3229,7 +3229,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// stack: (caller_of_deoptee, ...).
#ifdef ASSERT
__ lwz(R22_tmp2, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), unroll_block_reg);
__ lwz(R22_tmp2, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg);
__ cmpdi(CCR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap);
__ asm_assert_eq("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
#endif
@ -3238,7 +3238,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// If not compiled the loaded value is equal to the current SP (see frame::initial_deoptimization_info())
// and the frame is effectively not resized.
Register caller_sp = R23_tmp3;
__ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset_in_bytes(), unroll_block_reg);
__ ld_ptr(caller_sp, Deoptimization::UnrollBlock::initial_info_offset(), unroll_block_reg);
__ resize_frame_absolute(caller_sp, R24_tmp4, R25_tmp5);
// Allocate new interpreter frame(s) and possibly a c2i adapter

View File

@ -869,7 +869,7 @@ void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratc
// Got the oop to lock => execute!
__ add_monitor_to_stack(true, Rscratch1, R0);
__ std(Robj_to_lock, BasicObjectLock::obj_offset_in_bytes(), R26_monitor);
__ std(Robj_to_lock, in_bytes(BasicObjectLock::obj_offset()), R26_monitor);
__ lock_object(R26_monitor, Robj_to_lock);
}
@ -1001,7 +1001,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Regist
__ add(R18_locals, R15_esp, Rsize_of_parameters);
__ ld(Rconst_pool, in_bytes(ConstMethod::constants_offset()), Rconst_method);
__ ld(R27_constPoolCache, ConstantPool::cache_offset_in_bytes(), Rconst_pool);
__ ld(R27_constPoolCache, ConstantPool::cache_offset(), Rconst_pool);
// Set method data pointer.
if (ProfileInterpreter) {
@ -1026,7 +1026,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Regist
__ std(R12_scratch2, _abi0(lr), R1_SP);
// Get mirror and store it in the frame as GC root for this Method*.
__ ld(Rmirror, ConstantPool::pool_holder_offset_in_bytes(), Rconst_pool);
__ ld(Rmirror, ConstantPool::pool_holder_offset(), Rconst_pool);
__ ld(Rmirror, in_bytes(Klass::java_mirror_offset()), Rmirror);
__ resolve_oop_handle(Rmirror, R11_scratch1, R12_scratch2, MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS);
@ -1541,7 +1541,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ ld(active_handles, thread_(active_handles));
// TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
__ li(R0, 0);
__ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles);
__ stw(R0, in_bytes(JNIHandleBlock::top_offset()), active_handles);
Label exception_return_sync_check_already_unlocked;
__ ld(R0/*pending_exception*/, thread_(pending_exception));

View File

@ -1736,7 +1736,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ beq(CCR0, Lforward);
// Has the nmethod been invalidated already?
__ lbz(R0, nmethod::state_offset(), R3_RET);
__ lbz(R0, in_bytes(nmethod::state_offset()), R3_RET);
__ cmpwi(CCR0, R0, nmethod::in_use);
__ bne(CCR0, Lforward);
@ -3459,11 +3459,11 @@ void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex,
const Register Rtarget_method = Rindex;
// Get target method & entry point.
const int base = in_bytes(Klass::vtable_start_offset());
const ByteSize base = Klass::vtable_start_offset();
// Calc vtable addr scale the vtable index by 8.
__ sldi(Rindex, Rindex, exact_log2(vtableEntry::size_in_bytes()));
// Load target.
__ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes());
__ addi(Rrecv_klass, Rrecv_klass, in_bytes(base + vtableEntry::method_offset()));
__ ldx(Rtarget_method, Rindex, Rrecv_klass);
// Argument and return type profiling.
__ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true);
@ -4135,10 +4135,10 @@ void TemplateTable::monitorenter() {
Register Rlimit = Rcurrent_monitor;
// Set up search loop - start with topmost monitor.
__ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes());
__ addi(Rcurrent_obj_addr, R26_monitor, in_bytes(BasicObjectLock::obj_offset()));
__ ld(Rlimit, 0, R1_SP);
__ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base
__ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - in_bytes(BasicObjectLock::obj_offset()))); // Monitor base
// Check if any slot is present => short cut to allocation if not.
__ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit);
@ -4169,7 +4169,7 @@ void TemplateTable::monitorenter() {
// Check if we found a free slot.
__ bind(Lexit);
__ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes());
__ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - in_bytes(BasicObjectLock::obj_offset()));
__ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize);
__ b(Lfound);
@ -4178,7 +4178,7 @@ void TemplateTable::monitorenter() {
__ bind(Lallocate_new);
__ add_monitor_to_stack(false, Rscratch1, Rscratch2);
__ mr(Rcurrent_monitor, R26_monitor);
__ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes());
__ addi(Rcurrent_obj_addr, R26_monitor, in_bytes(BasicObjectLock::obj_offset()));
// ------------------------------------------------------------------------------
// We now have a slot to lock.
@ -4225,8 +4225,8 @@ void TemplateTable::monitorexit() {
Label Lloop;
// Start with topmost monitor.
__ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes());
__ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes());
__ addi(Rcurrent_obj_addr, R26_monitor, in_bytes(BasicObjectLock::obj_offset()));
__ addi(Rlimit, Rlimit, in_bytes(BasicObjectLock::obj_offset()));
__ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
__ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
@ -4253,7 +4253,7 @@ void TemplateTable::monitorexit() {
__ align(32, 12);
__ bind(Lfound);
__ addi(Rcurrent_monitor, Rcurrent_obj_addr,
-(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes());
-(frame::interpreter_frame_monitor_size() * wordSize) - in_bytes(BasicObjectLock::obj_offset()));
__ unlock_object(Rcurrent_monitor);
}

View File

@ -102,7 +102,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
int entry_offset = in_bytes(Klass::vtable_start_offset()) +
vtable_index*vtableEntry::size_in_bytes();
int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
int v_off = entry_offset + in_bytes(vtableEntry::method_offset());
__ ld(R19_method, (RegisterOrConstant)v_off, rcvr_klass);

View File

@ -58,7 +58,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
verify_oop(obj);
// save object being locked into the BasicObjectLock
sd(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
sd(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
null_check_offset = offset();
@ -130,7 +130,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
}
// load object
ld(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
ld(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
verify_oop(obj);
if (LockingMode == LM_LIGHTWEIGHT) {

View File

@ -87,7 +87,7 @@ void C2HandleAnonOMOwnerStub::emit(C2_MacroAssembler& masm) {
assert(t != noreg, "need tmp register");
// Fix owner to be the current thread.
__ sd(xthread, Address(mon, ObjectMonitor::owner_offset_in_bytes()));
__ sd(xthread, Address(mon, ObjectMonitor::owner_offset()));
// Pop owner object from lock-stack.
__ lwu(t, Address(xthread, JavaThread::lock_stack_top_offset()));

View File

@ -300,8 +300,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
get_constant_pool(result);
// Load pointer for resolved_references[] objArray
ld(result, Address(result, ConstantPool::cache_offset_in_bytes()));
ld(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
ld(result, Address(result, ConstantPool::cache_offset()));
ld(result, Address(result, ConstantPoolCache::resolved_references_offset()));
resolve_oop_handle(result, tmp, t1);
// Add in the index
addi(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
@ -313,7 +313,7 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset(
Register cpool, Register index, Register klass, Register temp) {
shadd(temp, index, cpool, temp, LogBytesPerWord);
lhu(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
ld(klass, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); // klass = cpool->_resolved_klasses
ld(klass, Address(cpool, ConstantPool::resolved_klasses_offset())); // klass = cpool->_resolved_klasses
shadd(klass, temp, klass, temp, LogBytesPerWord);
ld(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
}
@ -658,7 +658,7 @@ void InterpreterMacroAssembler::remove_activation(
// register for unlock_object to pass to VM directly
la(c_rarg1, monitor); // address of first monitor
ld(x10, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
ld(x10, Address(c_rarg1, BasicObjectLock::obj_offset()));
bnez(x10, unlock);
pop(state);
@ -737,7 +737,7 @@ void InterpreterMacroAssembler::remove_activation(
bind(loop);
// check if current entry is used
add(t0, c_rarg1, BasicObjectLock::obj_offset_in_bytes());
add(t0, c_rarg1, in_bytes(BasicObjectLock::obj_offset()));
ld(t0, Address(t0, 0));
bnez(t0, exception);
@ -811,8 +811,8 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
const Register tmp = c_rarg2;
const Register obj_reg = c_rarg3; // Will contain the oop
const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
const int mark_offset = lock_offset +
BasicLock::displaced_header_offset_in_bytes();
@ -914,14 +914,14 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
if (LockingMode != LM_LIGHTWEIGHT) {
// Convert from BasicObjectLock structure to object and BasicLock
// structure Store the BasicLock address into x10
la(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
la(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
}
// Load oop into obj_reg(c_rarg3)
ld(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
ld(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
// Free entry
sd(zr, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
sd(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
if (LockingMode == LM_LIGHTWEIGHT) {
Label slow_case;
@ -960,7 +960,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
}
// Call the runtime routine for slow case.
sd(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj
sd(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
j(done);
@ -1908,7 +1908,7 @@ void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret,
mv(tmp, (u1)Bytecodes::_invokehandle);
beq(t0, tmp, do_profile);
get_method(tmp);
lhu(t0, Address(tmp, Method::intrinsic_id_offset_in_bytes()));
lhu(t0, Address(tmp, Method::intrinsic_id_offset()));
mv(t1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
bne(t0, t1, profile_continue);
bind(do_profile);

View File

@ -125,12 +125,12 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_constant_pool_cache(Register reg) {
get_constant_pool(reg);
ld(reg, Address(reg, ConstantPool::cache_offset_in_bytes()));
ld(reg, Address(reg, ConstantPool::cache_offset()));
}
void get_cpool_and_tags(Register cpool, Register tags) {
get_constant_pool(cpool);
ld(tags, Address(cpool, ConstantPool::tags_offset_in_bytes()));
ld(tags, Address(cpool, ConstantPool::tags_offset()));
}
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);

View File

@ -2085,7 +2085,7 @@ void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, R
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
ld(dst, Address(xmethod, Method::const_offset()));
ld(dst, Address(dst, ConstMethod::constants_offset()));
ld(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes()));
ld(dst, Address(dst, ConstantPool::pool_holder_offset()));
ld(dst, Address(dst, mirror_offset));
resolve_oop_handle(dst, tmp1, tmp2);
}
@ -2400,7 +2400,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
// Compute start of first itableOffsetEntry (which is at the end of the vtable).
int vtable_base = in_bytes(Klass::vtable_start_offset());
int itentry_off = itableMethodEntry::method_offset_in_bytes();
int itentry_off = in_bytes(itableMethodEntry::method_offset());
int scan_step = itableOffsetEntry::size() * wordSize;
int vte_size = vtableEntry::size_in_bytes();
assert(vte_size == wordSize, "else adjust times_vte_scale");
@ -2427,7 +2427,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
Label search, found_method;
ld(method_result, Address(scan_tmp, itableOffsetEntry::interface_offset_in_bytes()));
ld(method_result, Address(scan_tmp, itableOffsetEntry::interface_offset()));
beq(intf_klass, method_result, found_method);
bind(search);
// Check that the previous entry is non-null. A null entry means that
@ -2435,14 +2435,14 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
// same as when the caller was compiled.
beqz(method_result, L_no_such_interface, /* is_far */ true);
addi(scan_tmp, scan_tmp, scan_step);
ld(method_result, Address(scan_tmp, itableOffsetEntry::interface_offset_in_bytes()));
ld(method_result, Address(scan_tmp, itableOffsetEntry::interface_offset()));
bne(intf_klass, method_result, search);
bind(found_method);
// Got a hit.
if (return_method) {
lwu(scan_tmp, Address(scan_tmp, itableOffsetEntry::offset_offset_in_bytes()));
lwu(scan_tmp, Address(scan_tmp, itableOffsetEntry::offset_offset()));
add(method_result, recv_klass, scan_tmp);
ld(method_result, Address(method_result));
}
@ -2452,10 +2452,10 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
void MacroAssembler::lookup_virtual_method(Register recv_klass,
RegisterOrConstant vtable_index,
Register method_result) {
const int base = in_bytes(Klass::vtable_start_offset());
const ByteSize base = Klass::vtable_start_offset();
assert(vtableEntry::size() * wordSize == 8,
"adjust the scaling in the code below");
int vtable_offset_in_bytes = base + vtableEntry::method_offset_in_bytes();
int vtable_offset_in_bytes = in_bytes(base + vtableEntry::method_offset());
if (vtable_index.is_register()) {
shadd(method_result, vtable_index.as_register(), recv_klass, method_result, LogBytesPerWord);
@ -3383,7 +3383,7 @@ void MacroAssembler::load_method_holder_cld(Register result, Register method) {
void MacroAssembler::load_method_holder(Register holder, Register method) {
ld(holder, Address(method, Method::const_offset())); // ConstMethod*
ld(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
ld(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
ld(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
}
// string indexof

View File

@ -200,7 +200,7 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
Label L;
BLOCK_COMMENT("verify_intrinsic_id {");
__ lhu(t0, Address(xmethod, Method::intrinsic_id_offset_in_bytes()));
__ lhu(t0, Address(xmethod, Method::intrinsic_id_offset()));
__ mv(t1, (int) iid);
__ beq(t0, t1, L);
if (iid == vmIntrinsics::_linkToVirtual ||

View File

@ -2488,7 +2488,7 @@ encode %{
// otherwise m->owner may contain a thread or a stack address.
//
// Try to CAS m->owner from NULL to current thread.
__ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes() - markWord::monitor_value));
__ add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset()) - markWord::monitor_value));
__ cmpxchg(/*memory address*/tmp, /*expected value*/zr, /*new value*/xthread, Assembler::int64, Assembler::aq,
Assembler::rl, /*result*/flag); // cas succeeds if flag == zr(expected)
@ -2507,7 +2507,7 @@ encode %{
// Recursive lock case
__ mv(flag, zr);
__ increment(Address(disp_hdr, ObjectMonitor::recursions_offset_in_bytes() - markWord::monitor_value), 1, t0, tmp);
__ increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1, t0, tmp);
__ bind(cont);
// zero flag indicates success
@ -2583,7 +2583,7 @@ encode %{
if (LockingMode == LM_LIGHTWEIGHT) {
// If the owner is anonymous, we need to fix it -- in an outline stub.
Register tmp2 = disp_hdr;
__ ld(tmp2, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
__ ld(tmp2, Address(tmp, ObjectMonitor::owner_offset()));
__ test_bit(t0, tmp2, exact_log2(ObjectMonitor::ANONYMOUS_OWNER));
C2HandleAnonOMOwnerStub* stub = new (Compile::current()->comp_arena()) C2HandleAnonOMOwnerStub(tmp, tmp2);
Compile::current()->output()->add_stub(stub);
@ -2591,24 +2591,24 @@ encode %{
__ bind(stub->continuation());
}
__ ld(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
__ ld(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
Label notRecursive;
__ beqz(disp_hdr, notRecursive); // Will be 0 if not recursive.
// Recursive lock
__ addi(disp_hdr, disp_hdr, -1);
__ sd(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
__ sd(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
__ mv(flag, zr);
__ j(cont);
__ bind(notRecursive);
__ ld(flag, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
__ ld(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
__ ld(flag, Address(tmp, ObjectMonitor::EntryList_offset()));
__ ld(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset()));
__ orr(flag, flag, disp_hdr); // Will be 0 if both are 0.
__ bnez(flag, cont);
// need a release store here
__ la(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
__ la(tmp, Address(tmp, ObjectMonitor::owner_offset()));
__ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
__ sd(zr, Address(tmp)); // set unowned

View File

@ -1869,7 +1869,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// reset handle block
__ ld(x12, Address(xthread, JavaThread::active_handles_offset()));
__ sd(zr, Address(x12, JNIHandleBlock::top_offset_in_bytes()));
__ sd(zr, Address(x12, JNIHandleBlock::top_offset()));
__ leave();
@ -2278,7 +2278,7 @@ void SharedRuntime::generate_deopt_blob() {
// Load UnrollBlock* into x15
__ mv(x15, x10);
__ lwu(xcpool, Address(x15, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
__ lwu(xcpool, Address(x15, Deoptimization::UnrollBlock::unpack_kind_offset()));
Label noException;
__ mv(t0, Deoptimization::Unpack_exception);
__ bne(xcpool, t0, noException); // Was exception pending?
@ -2321,7 +2321,7 @@ void SharedRuntime::generate_deopt_blob() {
// when we are done the return to frame 3 will still be on the stack.
// Pop deoptimized frame
__ lwu(x12, Address(x15, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
__ lwu(x12, Address(x15, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
__ sub(x12, x12, 2 * wordSize);
__ add(sp, sp, x12);
__ ld(fp, Address(sp, 0));
@ -2333,17 +2333,17 @@ void SharedRuntime::generate_deopt_blob() {
// Compilers generate code that bang the stack by as much as the
// interpreter would need. So this stack banging should never
// trigger a fault. Verify that it does not on non product builds.
__ lwu(x9, Address(x15, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
__ lwu(x9, Address(x15, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
__ bang_stack_size(x9, x12);
#endif
// Load address of array of frame pcs into x12
__ ld(x12, Address(x15, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
__ ld(x12, Address(x15, Deoptimization::UnrollBlock::frame_pcs_offset()));
// Load address of array of frame sizes into x14
__ ld(x14, Address(x15, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
__ ld(x14, Address(x15, Deoptimization::UnrollBlock::frame_sizes_offset()));
// Load counter into x13
__ lwu(x13, Address(x15, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
__ lwu(x13, Address(x15, Deoptimization::UnrollBlock::number_of_frames_offset()));
// Now adjust the caller's stack to make up for the extra locals
// but record the original sp so that we can save it in the skeletal interpreter
@ -2355,7 +2355,7 @@ void SharedRuntime::generate_deopt_blob() {
__ mv(sender_sp, sp);
__ lwu(x9, Address(x15,
Deoptimization::UnrollBlock::
caller_adjustment_offset_in_bytes()));
caller_adjustment_offset()));
__ sub(sp, sp, x9);
// Push interpreter frames in a loop
@ -2521,7 +2521,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
#ifdef ASSERT
{ Label L;
__ lwu(t0, Address(x14, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
__ lwu(t0, Address(x14, Deoptimization::UnrollBlock::unpack_kind_offset()));
__ mv(t1, Deoptimization::Unpack_uncommon_trap);
__ beq(t0, t1, L);
__ stop("SharedRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
@ -2541,7 +2541,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// Pop deoptimized frame (int)
__ lwu(x12, Address(x14,
Deoptimization::UnrollBlock::
size_of_deoptimized_frame_offset_in_bytes()));
size_of_deoptimized_frame_offset()));
__ sub(x12, x12, 2 * wordSize);
__ add(sp, sp, x12);
__ ld(fp, Address(sp, 0));
@ -2555,23 +2555,23 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// trigger a fault. Verify that it does not on non product builds.
__ lwu(x11, Address(x14,
Deoptimization::UnrollBlock::
total_frame_sizes_offset_in_bytes()));
total_frame_sizes_offset()));
__ bang_stack_size(x11, x12);
#endif
// Load address of array of frame pcs into x12 (address*)
__ ld(x12, Address(x14,
Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
Deoptimization::UnrollBlock::frame_pcs_offset()));
// Load address of array of frame sizes into x15 (intptr_t*)
__ ld(x15, Address(x14,
Deoptimization::UnrollBlock::
frame_sizes_offset_in_bytes()));
frame_sizes_offset()));
// Counter
__ lwu(x13, Address(x14,
Deoptimization::UnrollBlock::
number_of_frames_offset_in_bytes())); // (int)
number_of_frames_offset())); // (int)
// Now adjust the caller's stack to make up for the extra locals but
// record the original sp so that we can save it in the skeletal
@ -2582,7 +2582,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ lwu(x11, Address(x14,
Deoptimization::UnrollBlock::
caller_adjustment_offset_in_bytes())); // (int)
caller_adjustment_offset())); // (int)
__ mv(sender_sp, sp);
__ sub(sp, sp, x11);

View File

@ -709,7 +709,7 @@ void TemplateInterpreterGenerator::lock_method() {
__ sd(sp, Address(fp, frame::interpreter_frame_extended_sp_offset * wordSize));
__ sd(esp, monitor_block_top); // set new monitor block top
// store object
__ sd(x10, Address(esp, BasicObjectLock::obj_offset_in_bytes()));
__ sd(x10, Address(esp, BasicObjectLock::obj_offset()));
__ mv(c_rarg1, esp); // object address
__ lock_object(c_rarg1);
}
@ -758,7 +758,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
__ ld(xcpool, Address(xmethod, Method::const_offset()));
__ ld(xcpool, Address(xcpool, ConstMethod::constants_offset()));
__ ld(xcpool, Address(xcpool, ConstantPool::cache_offset_in_bytes()));
__ ld(xcpool, Address(xcpool, ConstantPool::cache_offset()));
__ sd(xcpool, Address(sp, 3 * wordSize));
__ sub(t0, xlocals, fp);
__ srai(t0, t0, Interpreter::logStackElementSize); // t0 = xlocals - fp();
@ -1211,7 +1211,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// reset handle block
__ ld(t, Address(xthread, JavaThread::active_handles_offset()));
__ sd(zr, Address(t, JNIHandleBlock::top_offset_in_bytes()));
__ sd(zr, Address(t, JNIHandleBlock::top_offset()));
// If result is an oop unbox and store it in frame where gc will see it
// and result handler will pick it up
@ -1286,7 +1286,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
(intptr_t)(frame::interpreter_frame_initial_sp_offset *
wordSize - sizeof(BasicObjectLock))));
__ ld(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
__ ld(t, Address(c_rarg1, BasicObjectLock::obj_offset()));
__ bnez(t, unlock);
// Entry already unlocked, need to throw exception

View File

@ -3758,7 +3758,7 @@ void TemplateTable::monitorenter() {
__ bind(loop);
// check if current entry is used
// if not used then remember entry in c_rarg1
__ ld(t0, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
__ ld(t0, Address(c_rarg3, BasicObjectLock::obj_offset()));
__ bnez(t0, notUsed);
__ mv(c_rarg1, c_rarg3);
__ bind(notUsed);
@ -3815,7 +3815,7 @@ void TemplateTable::monitorenter() {
__ addi(xbcp, xbcp, 1);
// store object
__ sd(x10, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
__ sd(x10, Address(c_rarg1, BasicObjectLock::obj_offset()));
__ lock_object(c_rarg1);
// check to make sure this monitor doesn't cause stack overflow after locking
@ -3852,7 +3852,7 @@ void TemplateTable::monitorexit() {
__ bind(loop);
// check if current entry is for same object
__ ld(t0, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
__ ld(t0, Address(c_rarg1, BasicObjectLock::obj_offset()));
// if same object then stop searching
__ beq(x10, t0, found);
// otherwise advance to next entry

View File

@ -93,7 +93,7 @@ void C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hd
z_lg(hdr, Address(obj, hdr_offset));
// Save object being locked into the BasicObjectLock...
z_stg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
z_stg(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
if (DiagnoseSyncOnValueBasedClasses != 0) {
load_klass(Z_R1_scratch, obj);
@ -158,7 +158,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
}
// Load object.
z_lg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
z_lg(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
verify_oop(obj, FILE_AND_LINE);
if (LockingMode == LM_LIGHTWEIGHT) {

View File

@ -391,8 +391,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
Register tmp = index; // reuse
z_sllg(index, index, LogBytesPerHeapOop); // Offset into resolved references array.
// Load pointer for resolved_references[] objArray.
z_lg(result, ConstantPool::cache_offset_in_bytes(), result);
z_lg(result, ConstantPoolCache::resolved_references_offset_in_bytes(), result);
z_lg(result, in_bytes(ConstantPool::cache_offset()), result);
z_lg(result, in_bytes(ConstantPoolCache::resolved_references_offset()), result);
resolve_oop_handle(result); // Load resolved references array itself.
#ifdef ASSERT
NearLabel index_ok;
@ -412,7 +412,7 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register cpool, Re
// int resolved_klass_index = extract_low_short_from_int(value);
z_llgh(offset, Address(cpool, offset, sizeof(ConstantPool) + 2)); // offset = resolved_klass_index (s390 is big-endian)
z_sllg(offset, offset, LogBytesPerWord); // Convert 'index' to 'offset'
z_lg(iklass, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes())); // iklass = cpool->_resolved_klasses
z_lg(iklass, Address(cpool, ConstantPool::resolved_klasses_offset())); // iklass = cpool->_resolved_klasses
z_lg(iklass, Address(iklass, offset, Array<Klass*>::base_offset_in_bytes()));
}
@ -754,12 +754,12 @@ void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {
void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) {
get_constant_pool(Rdst);
mem2reg_opt(Rdst, Address(Rdst, ConstantPool::cache_offset_in_bytes()));
mem2reg_opt(Rdst, Address(Rdst, ConstantPool::cache_offset()));
}
void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) {
get_constant_pool(Rcpool);
mem2reg_opt(Rtags, Address(Rcpool, ConstantPool::tags_offset_in_bytes()));
mem2reg_opt(Rtags, Address(Rcpool, ConstantPool::tags_offset()));
}
// Unlock if synchronized method.
@ -810,7 +810,7 @@ void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
// We use Z_ARG2 so that if we go slow path it will be the correct
// register for unlock_object to pass to VM directly.
load_address(Z_ARG2, monitor); // Address of first monitor.
z_lg(Z_ARG3, Address(Z_ARG2, BasicObjectLock::obj_offset_in_bytes()));
z_lg(Z_ARG3, Address(Z_ARG2, BasicObjectLock::obj_offset()));
compareU64_and_branch(Z_ARG3, (intptr_t)0L, bcondNotEqual, unlock);
if (throw_monitor_exception) {
@ -877,7 +877,7 @@ void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
bind(loop);
// Check if current entry is used.
load_and_test_long(Z_R0_scratch, Address(R_current_monitor, BasicObjectLock::obj_offset_in_bytes()));
load_and_test_long(Z_R0_scratch, Address(R_current_monitor, BasicObjectLock::obj_offset()));
z_brne(exception);
add2reg(R_current_monitor, entry_size); // Otherwise advance to next entry.
@ -1025,7 +1025,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
// monitor->lock()->set_displaced_header(displaced_header);
// Initialize the box (Must happen before we update the object mark!).
z_stg(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
z_stg(displaced_header, in_bytes(BasicObjectLock::lock_offset()) +
BasicLock::displaced_header_offset_in_bytes(), monitor);
// if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
@ -1059,7 +1059,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
// header indicating it is a recursive lock and be done.
z_brne(slow_case);
z_release(); // Membar unnecessary on zarch AND because the above csg does a sync before and after.
z_stg(Z_R0/*==0!*/, BasicObjectLock::lock_offset_in_bytes() +
z_stg(Z_R0/*==0!*/, in_bytes(BasicObjectLock::lock_offset()) +
BasicLock::displaced_header_offset_in_bytes(), monitor);
z_bru(done);
@ -1107,7 +1107,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object)
const Register displaced_header = Z_ARG4;
const Register current_header = Z_R1;
Address obj_entry(monitor, BasicObjectLock::obj_offset_in_bytes());
Address obj_entry(monitor, BasicObjectLock::obj_offset());
Label done;
if (object == noreg) {
@ -1128,7 +1128,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object)
// Test first if we are in the fast recursive case.
MacroAssembler::load_and_test_long(displaced_header,
Address(monitor, BasicObjectLock::lock_offset_in_bytes() +
Address(monitor, in_bytes(BasicObjectLock::lock_offset()) +
BasicLock::displaced_header_offset_in_bytes()));
z_bre(done); // displaced_header == 0 -> goto done
@ -1810,10 +1810,10 @@ void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret,
get_method(tmp);
// Supplement to 8139891: _intrinsic_id exceeded 1-byte size limit.
if (Method::intrinsic_id_size_in_bytes() == 1) {
z_cli(Method::intrinsic_id_offset_in_bytes(), tmp, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
z_cli(in_bytes(Method::intrinsic_id_offset()), tmp, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
} else {
assert(Method::intrinsic_id_size_in_bytes() == 2, "size error: check Method::_intrinsic_id");
z_lh(tmp, Method::intrinsic_id_offset_in_bytes(), Z_R0, tmp);
z_lh(tmp, in_bytes(Method::intrinsic_id_offset()), Z_R0, tmp);
z_chi(tmp, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
}
z_brne(profile_continue);

View File

@ -2766,10 +2766,8 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
// Loop over all itable entries until desired interfaceOop(Rinterface) found.
const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
add2reg_with_index(itable_entry_addr,
vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(),
in_bytes(Klass::vtable_start_offset() + itableOffsetEntry::interface_offset()),
recv_klass, vtable_len);
const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;
@ -2789,8 +2787,8 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
// Entry found and itable_entry_addr points to it, get offset of vtable for interface.
if (return_method) {
const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() -
itableOffsetEntry::interface_offset_in_bytes()) -
const int vtable_offset_offset = in_bytes(itableOffsetEntry::offset_offset() -
itableOffsetEntry::interface_offset()) -
itable_offset_search_inc;
// Compute itableMethodEntry and get method and entry point
@ -2798,7 +2796,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
// for computing the entry's offset has a fixed and a dynamic part,
// the latter depending on the matched interface entry and on the case,
// that the itable index has been passed as a register, not a constant value.
int method_offset = itableMethodEntry::method_offset_in_bytes();
int method_offset = in_bytes(itableMethodEntry::method_offset());
// Fixed part (displacement), common operand.
Register itable_offset = method_result; // Dynamic part (index register).
@ -2838,14 +2836,14 @@ void MacroAssembler::lookup_virtual_method(Register recv_klass,
Address vtable_entry_addr(recv_klass,
vtable_index.as_constant() * wordSize +
base +
vtableEntry::method_offset_in_bytes());
in_bytes(vtableEntry::method_offset()));
z_lg(method_result, vtable_entry_addr);
} else {
// Shift index properly and load with base + index + disp.
Register vindex = vtable_index.as_register();
Address vtable_entry_addr(recv_klass, vindex,
base + vtableEntry::method_offset_in_bytes());
base + in_bytes(vtableEntry::method_offset()));
z_sllg(vindex, vindex, exact_log2(wordSize));
z_lg(method_result, vtable_entry_addr);
@ -4211,7 +4209,7 @@ void MacroAssembler::resolve_oop_handle(Register result) {
void MacroAssembler::load_mirror_from_const_method(Register mirror, Register const_method) {
mem2reg_opt(mirror, Address(const_method, ConstMethod::constants_offset()));
mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes()));
mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset()));
mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset()));
resolve_oop_handle(mirror);
}
@ -4219,7 +4217,7 @@ void MacroAssembler::load_mirror_from_const_method(Register mirror, Register con
void MacroAssembler::load_method_holder(Register holder, Register method) {
mem2reg_opt(holder, Address(method, Method::const_offset()));
mem2reg_opt(holder, Address(holder, ConstMethod::constants_offset()));
mem2reg_opt(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes()));
mem2reg_opt(holder, Address(holder, ConstantPool::pool_holder_offset()));
}
//---------------------------------------------------------------

View File

@ -276,10 +276,10 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
// Supplement to 8139891: _intrinsic_id exceeded 1-byte size limit.
if (Method::intrinsic_id_size_in_bytes() == 1) {
__ z_cli(Address(Z_method, Method::intrinsic_id_offset_in_bytes()), (int)iid);
__ z_cli(Address(Z_method, Method::intrinsic_id_offset()), (int)iid);
} else {
assert(Method::intrinsic_id_size_in_bytes() == 2, "size error: check Method::_intrinsic_id");
__ z_lh(Z_R0_scratch, Address(Z_method, Method::intrinsic_id_offset_in_bytes()));
__ z_lh(Z_R0_scratch, Address(Z_method, Method::intrinsic_id_offset()));
__ z_chi(Z_R0_scratch, (int)iid);
}
__ z_bre(L);

View File

@ -2191,7 +2191,7 @@ encode %{
__ load_klass(Z_method, Z_R2);
int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index * vtableEntry::size_in_bytes();
int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
int v_off = entry_offset + in_bytes(vtableEntry::method_offset());
if (Displacement::is_validDisp(v_off) ) {
// Can use load instruction with large offset.

View File

@ -1986,7 +1986,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Reset handle block.
__ z_lg(Z_R1_scratch, Address(Z_thread, JavaThread::active_handles_offset()));
__ clear_mem(Address(Z_R1_scratch, JNIHandleBlock::top_offset_in_bytes()), 4);
__ clear_mem(Address(Z_R1_scratch, JNIHandleBlock::top_offset()), 4);
// Check for pending exceptions.
__ load_and_test_long(Z_R0, Address(Z_thread, Thread::pending_exception_offset()));
@ -2318,8 +2318,8 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
{
Label ic_miss;
const int klass_offset = oopDesc::klass_offset_in_bytes();
const int holder_klass_offset = CompiledICHolder::holder_klass_offset();
const int holder_metadata_offset = CompiledICHolder::holder_metadata_offset();
const int holder_klass_offset = in_bytes(CompiledICHolder::holder_klass_offset());
const int holder_metadata_offset = in_bytes(CompiledICHolder::holder_metadata_offset());
// Out-of-line call to ic_miss handler.
__ call_ic_miss_handler(ic_miss, 0x11, 0, Z_R1_scratch);
@ -2451,11 +2451,11 @@ static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
BLOCK_COMMENT("push_skeleton_frames {");
// _number_of_frames is of type int (deoptimization.hpp).
__ z_lgf(number_of_frames_reg,
Address(unroll_block_reg, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
Address(unroll_block_reg, Deoptimization::UnrollBlock::number_of_frames_offset()));
__ z_lg(pcs_reg,
Address(unroll_block_reg, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
Address(unroll_block_reg, Deoptimization::UnrollBlock::frame_pcs_offset()));
__ z_lg(frame_sizes_reg,
Address(unroll_block_reg, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
Address(unroll_block_reg, Deoptimization::UnrollBlock::frame_sizes_offset()));
// stack: (caller_of_deoptee, ...).
@ -2465,7 +2465,7 @@ static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
// Note: entry and interpreted frames are adjusted, too. But this doesn't harm.
__ z_lgf(Z_R1_scratch,
Address(unroll_block_reg, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
Address(unroll_block_reg, Deoptimization::UnrollBlock::caller_adjustment_offset()));
__ z_lgr(tmp1, Z_SP); // Save the sender sp before extending the frame.
__ resize_frame_sub(Z_R1_scratch, tmp2/*tmp*/);
// The oldest skeletal frame requires a valid sender_sp to make it walkable
@ -2645,7 +2645,7 @@ void SharedRuntime::generate_deopt_blob() {
RegisterSaver::restore_result_registers(masm);
// reload the exec mode from the UnrollBlock (it might have changed)
__ z_llgf(exec_mode_reg, Address(unroll_block_reg, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
__ z_llgf(exec_mode_reg, Address(unroll_block_reg, Deoptimization::UnrollBlock::unpack_kind_offset()));
// In excp_deopt_mode, restore and clear exception oop which we
// stored in the thread during exception entry above. The exception
@ -2778,7 +2778,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
#ifdef ASSERT
assert(Immediate::is_uimm8(Deoptimization::Unpack_LIMIT), "Code not fit for larger immediates");
assert(Immediate::is_uimm8(Deoptimization::Unpack_uncommon_trap), "Code not fit for larger immediates");
const int unpack_kind_byte_offset = Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()
const int unpack_kind_byte_offset = in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset())
#ifndef VM_LITTLE_ENDIAN
+ 3
#endif

View File

@ -920,7 +920,7 @@ void TemplateInterpreterGenerator::lock_method(void) {
__ add_monitor_to_stack(true, Z_ARG3, Z_ARG4, Z_ARG5); // Allocate monitor elem.
// Store object and lock it.
__ get_monitors(Z_tmp_1);
__ reg2mem_opt(object, Address(Z_tmp_1, BasicObjectLock::obj_offset_in_bytes()));
__ reg2mem_opt(object, Address(Z_tmp_1, BasicObjectLock::obj_offset()));
__ lock_object(Z_tmp_1, object);
BLOCK_COMMENT("} lock_method");
@ -1118,7 +1118,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// Load cp cache and save it at the end of this block.
__ z_lg(Z_R1_scratch, Address(const_method, ConstMethod::constants_offset()));
__ z_lg(Z_R1_scratch, Address(Z_R1_scratch, ConstantPool::cache_offset_in_bytes()));
__ z_lg(Z_R1_scratch, Address(Z_R1_scratch, ConstantPool::cache_offset()));
// z_ijava_state->method = method;
__ z_stg(Z_method, _z_ijava_state_neg(method), fp);
@ -1601,7 +1601,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// Reset handle block.
__ z_lg(Z_R1/*active_handles*/, thread_(active_handles));
__ clear_mem(Address(Z_R1, JNIHandleBlock::top_offset_in_bytes()), 4);
__ clear_mem(Address(Z_R1, JNIHandleBlock::top_offset()), 4);
// Handle exceptions (exception handling will handle unlocking!).
{

View File

@ -1976,7 +1976,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ compare64_and_branch(Z_RET, (intptr_t) 0, Assembler::bcondEqual, dispatch);
// Nmethod may have been invalidated (VM may block upon call_VM return).
__ z_cliy(nmethod::state_offset(), Z_RET, nmethod::in_use);
__ z_cliy(in_bytes(nmethod::state_offset()), Z_RET, nmethod::in_use);
__ z_brne(dispatch);
// Migrate the interpreter frame off of the stack.
@ -3555,7 +3555,7 @@ void TemplateTable::invokevirtual_helper(Register index,
__ z_sllg(index, index, exact_log2(vtableEntry::size_in_bytes()));
__ mem2reg_opt(method,
Address(Z_tmp_2, index,
Klass::vtable_start_offset() + in_ByteSize(vtableEntry::method_offset_in_bytes())));
Klass::vtable_start_offset() + vtableEntry::method_offset()));
__ profile_arguments_type(Z_ARG4, method, Z_ARG5, true);
__ jump_from_interpreted(method, Z_ARG4);
BLOCK_COMMENT("} invokevirtual_helper");
@ -4178,7 +4178,7 @@ void TemplateTable::monitorenter() {
__ bind(loop);
// Check if current entry is used.
__ load_and_test_long(Rlocked_obj, Address(Rcurr_monitor, BasicObjectLock::obj_offset_in_bytes()));
__ load_and_test_long(Rlocked_obj, Address(Rcurr_monitor, BasicObjectLock::obj_offset()));
__ z_brne(not_free);
// If not used then remember entry in Rfree_slot.
__ z_lgr(Rfree_slot, Rcurr_monitor);
@ -4210,7 +4210,7 @@ void TemplateTable::monitorenter() {
__ add2reg(Z_bcp, 1, Z_bcp);
// Store object.
__ z_stg(Z_tos, BasicObjectLock::obj_offset_in_bytes(), Rfree_slot);
__ z_stg(Z_tos, in_bytes(BasicObjectLock::obj_offset()), Rfree_slot);
__ lock_object(Rfree_slot, Z_tos);
// Check to make sure this monitor doesn't cause stack overflow after locking.
@ -4266,7 +4266,7 @@ void TemplateTable::monitorexit() {
__ bind(loop);
// Check if current entry is for same object.
__ z_lg(Rlocked_obj, Address(Rcurr_monitor, BasicObjectLock::obj_offset_in_bytes()));
__ z_lg(Rlocked_obj, Address(Rcurr_monitor, BasicObjectLock::obj_offset()));
// If same object then stop searching.
__ compareU64_and_branch(Rlocked_obj, Z_tos, Assembler::bcondEqual, found);
// Otherwise advance to next entry.

View File

@ -110,7 +110,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
int entry_offset = in_bytes(Klass::vtable_start_offset()) +
vtable_index * vtableEntry::size_in_bytes();
int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
int v_off = entry_offset + in_bytes(vtableEntry::method_offset());
// Set method (in case of interpreted method), and destination address.
// Duplicate safety code from enc_class Java_Dynamic_Call_dynTOC.

View File

@ -48,7 +48,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
verify_oop(obj);
// save object being locked into the BasicObjectLock
movptr(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj);
movptr(Address(disp_hdr, BasicObjectLock::obj_offset()), obj);
null_check_offset = offset();
@ -129,7 +129,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
}
// load object
movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
verify_oop(obj);
if (LockingMode == LM_LIGHTWEIGHT) {

View File

@ -232,7 +232,7 @@ void C2_MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg,
// set rtm_state to "no rtm" in MDO
mov_metadata(tmpReg, method_data);
lock();
orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), NoRTM);
orl(Address(tmpReg, MethodData::rtm_state_offset()), NoRTM);
}
jmpb(L_done);
bind(L_check_always_rtm1);
@ -246,7 +246,7 @@ void C2_MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg,
// set rtm_state to "always rtm" in MDO
mov_metadata(tmpReg, method_data);
lock();
orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), UseRTM);
orl(Address(tmpReg, MethodData::rtm_state_offset()), UseRTM);
}
bind(L_done);
}

View File

@ -189,7 +189,7 @@ void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret,
cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle);
jcc(Assembler::equal, do_profile);
get_method(tmp);
cmpw(Address(tmp, Method::intrinsic_id_offset_in_bytes()), static_cast<int>(vmIntrinsics::_compiledLambdaForm));
cmpw(Address(tmp, Method::intrinsic_id_offset()), static_cast<int>(vmIntrinsics::_compiledLambdaForm));
jcc(Assembler::notEqual, profile_continue);
bind(do_profile);
@ -510,8 +510,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
get_constant_pool(result);
// load pointer for resolved_references[] objArray
movptr(result, Address(result, ConstantPool::cache_offset_in_bytes()));
movptr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
movptr(result, Address(result, ConstantPool::cache_offset()));
movptr(result, Address(result, ConstantPoolCache::resolved_references_offset()));
resolve_oop_handle(result, tmp);
load_heap_oop(result, Address(result, index,
UseCompressedOops ? Address::times_4 : Address::times_ptr,
@ -526,7 +526,7 @@ void InterpreterMacroAssembler::load_resolved_klass_at_index(Register klass,
movw(index, Address(cpool, index, Address::times_ptr, sizeof(ConstantPool)));
Register resolved_klasses = cpool;
movptr(resolved_klasses, Address(cpool, ConstantPool::resolved_klasses_offset_in_bytes()));
movptr(resolved_klasses, Address(cpool, ConstantPool::resolved_klasses_offset()));
movptr(klass, Address(resolved_klasses, index, Address::times_ptr, Array<Klass*>::base_offset_in_bytes()));
}
@ -1038,7 +1038,7 @@ void InterpreterMacroAssembler::remove_activation(
// register for unlock_object to pass to VM directly
lea(robj, monitor); // address of first monitor
movptr(rax, Address(robj, BasicObjectLock::obj_offset_in_bytes()));
movptr(rax, Address(robj, BasicObjectLock::obj_offset()));
testptr(rax, rax);
jcc(Assembler::notZero, unlock);
@ -1121,7 +1121,7 @@ void InterpreterMacroAssembler::remove_activation(
bind(loop);
// check if current entry is used
cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD);
cmpptr(Address(rmon, BasicObjectLock::obj_offset()), NULL_WORD);
jcc(Assembler::notEqual, exception);
addptr(rmon, entry_size); // otherwise advance to next entry
@ -1208,8 +1208,8 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
const Register rklass_decode_tmp = rscratch1;
const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
const int mark_offset = lock_offset +
BasicLock::displaced_header_offset_in_bytes();
@ -1338,14 +1338,14 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
if (LockingMode != LM_LIGHTWEIGHT) {
// Convert from BasicObjectLock structure to object and BasicLock
// structure Store the BasicLock address into %rax
lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
}
// Load oop into obj_reg(%c_rarg3)
movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
// Free entry
movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD);
movptr(Address(lock_reg, BasicObjectLock::obj_offset()), NULL_WORD);
if (LockingMode == LM_LIGHTWEIGHT) {
#ifdef _LP64
@ -1388,7 +1388,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
bind(slow_case);
// Call the runtime routine for slow case.
movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), obj_reg); // restore obj
movptr(Address(lock_reg, BasicObjectLock::obj_offset()), obj_reg); // restore obj
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
bind(done);

View File

@ -95,12 +95,12 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_constant_pool_cache(Register reg) {
get_constant_pool(reg);
movptr(reg, Address(reg, ConstantPool::cache_offset_in_bytes()));
movptr(reg, Address(reg, ConstantPool::cache_offset()));
}
void get_cpool_and_tags(Register cpool, Register tags) {
get_constant_pool(cpool);
movptr(tags, Address(cpool, ConstantPool::tags_offset_in_bytes()));
movptr(tags, Address(cpool, ConstantPool::tags_offset()));
}
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);

View File

@ -4244,7 +4244,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
// Compute start of first itableOffsetEntry (which is at the end of the vtable)
int vtable_base = in_bytes(Klass::vtable_start_offset());
int itentry_off = itableMethodEntry::method_offset_in_bytes();
int itentry_off = in_bytes(itableMethodEntry::method_offset());
int scan_step = itableOffsetEntry::size() * wordSize;
int vte_size = vtableEntry::size_in_bytes();
Address::ScaleFactor times_vte_scale = Address::times_ptr;
@ -4269,7 +4269,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
Label search, found_method;
for (int peel = 1; peel >= 0; peel--) {
movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset()));
cmpptr(intf_klass, method_result);
if (peel) {
@ -4295,7 +4295,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
if (return_method) {
// Got a hit.
movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset()));
movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
}
}
@ -4305,11 +4305,11 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
void MacroAssembler::lookup_virtual_method(Register recv_klass,
RegisterOrConstant vtable_index,
Register method_result) {
const int base = in_bytes(Klass::vtable_start_offset());
const ByteSize base = Klass::vtable_start_offset();
assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
Address vtable_entry_addr(recv_klass,
vtable_index, Address::times_ptr,
base + vtableEntry::method_offset_in_bytes());
base + vtableEntry::method_offset());
movptr(method_result, vtable_entry_addr);
}
@ -5116,7 +5116,7 @@ void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod)
void MacroAssembler::load_method_holder(Register holder, Register method) {
movptr(holder, Address(method, Method::const_offset())); // ConstMethod*
movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
movptr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass*
}
void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {

View File

@ -260,7 +260,7 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
Label L;
BLOCK_COMMENT("verify_intrinsic_id {");
__ cmpw(Address(rbx_method, Method::intrinsic_id_offset_in_bytes()), (int) iid);
__ cmpw(Address(rbx_method, Method::intrinsic_id_offset()), (int) iid);
__ jcc(Assembler::equal, L);
if (iid == vmIntrinsics::_linkToVirtual ||
iid == vmIntrinsics::_linkToSpecial) {

View File

@ -1918,7 +1918,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// reset handle block
__ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
__ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
__ movl(Address(rcx, JNIHandleBlock::top_offset()), NULL_WORD);
// Any exception pending?
__ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
@ -2242,7 +2242,7 @@ void SharedRuntime::generate_deopt_blob() {
// Move the unpack kind to a safe place in the UnrollBlock because
// we are very short of registers
Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset());
// retrieve the deopt kind from the UnrollBlock.
__ movl(rax, unpack_kind);
@ -2288,33 +2288,33 @@ void SharedRuntime::generate_deopt_blob() {
// when we are done the return to frame 3 will still be on the stack.
// Pop deoptimized frame
__ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
__ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
// sp should be pointing at the return address to the caller (3)
// Pick up the initial fp we should save
// restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset()));
#ifdef ASSERT
// Compilers generate code that bang the stack by as much as the
// interpreter would need. So this stack banging should never
// trigger a fault. Verify that it does not on non product builds.
__ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
__ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset()));
__ bang_stack_size(rbx, rcx);
#endif
// Load array of frame pcs into ECX
__ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
__ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset()));
__ pop(rsi); // trash the old pc
// Load array of frame sizes into ESI
__ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
__ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset()));
Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset());
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset()));
__ movl(counter, rbx);
// Now adjust the caller's stack to make up for the extra locals
@ -2322,9 +2322,9 @@ void SharedRuntime::generate_deopt_blob() {
// frame and the stack walking of interpreter_sender will get the unextended sp
// value and not the "real" sp value.
Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset());
__ movptr(sp_temp, rsp);
__ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
__ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset()));
__ subptr(rsp, rbx);
// Push interpreter frames in a loop
@ -2492,7 +2492,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
#ifdef ASSERT
{ Label L;
__ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
__ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset()),
(int32_t)Deoptimization::Unpack_uncommon_trap);
__ jcc(Assembler::equal, L);
__ stop("SharedRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
@ -2511,34 +2511,34 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ addptr(rsp,(framesize-1)*wordSize); // Epilog!
// Pop deoptimized frame
__ movl2ptr(rcx, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
__ movl2ptr(rcx, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
__ addptr(rsp, rcx);
// sp should be pointing at the return address to the caller (3)
// Pick up the initial fp we should save
// restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset()));
#ifdef ASSERT
// Compilers generate code that bang the stack by as much as the
// interpreter would need. So this stack banging should never
// trigger a fault. Verify that it does not on non product builds.
__ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
__ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset()));
__ bang_stack_size(rbx, rcx);
#endif
// Load array of frame pcs into ECX
__ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
__ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset()));
__ pop(rsi); // trash the pc
// Load array of frame sizes into ESI
__ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
__ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset()));
Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset());
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset()));
__ movl(counter, rbx);
// Now adjust the caller's stack to make up for the extra locals
@ -2546,9 +2546,9 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// frame and the stack walking of interpreter_sender will get the unextended sp
// value and not the "real" sp value.
Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset());
__ movptr(sp_temp, rsp);
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset()));
__ subptr(rsp, rbx);
// Push interpreter frames in a loop

View File

@ -2369,7 +2369,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// reset handle block
__ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
__ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
__ movl(Address(rcx, JNIHandleBlock::top_offset()), NULL_WORD);
// pop our frame
@ -2728,7 +2728,7 @@ void SharedRuntime::generate_deopt_blob() {
// Load UnrollBlock* into rdi
__ mov(rdi, rax);
__ movl(r14, Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
__ movl(r14, Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset()));
Label noException;
__ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending?
__ jcc(Assembler::notEqual, noException);
@ -2767,34 +2767,34 @@ void SharedRuntime::generate_deopt_blob() {
// when we are done the return to frame 3 will still be on the stack.
// Pop deoptimized frame
__ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
__ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset()));
__ addptr(rsp, rcx);
// rsp should be pointing at the return address to the caller (3)
// Pick up the initial fp we should save
// restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset()));
#ifdef ASSERT
// Compilers generate code that bang the stack by as much as the
// interpreter would need. So this stack banging should never
// trigger a fault. Verify that it does not on non product builds.
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset()));
__ bang_stack_size(rbx, rcx);
#endif
// Load address of array of frame pcs into rcx
__ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
__ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset()));
// Trash the old pc
__ addptr(rsp, wordSize);
// Load address of array of frame sizes into rsi
__ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
__ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset()));
// Load counter into rdx
__ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
__ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset()));
// Now adjust the caller's stack to make up for the extra locals
// but record the original sp so that we can save it in the skeletal interpreter
@ -2806,7 +2806,7 @@ void SharedRuntime::generate_deopt_blob() {
__ mov(sender_sp, rsp);
__ movl(rbx, Address(rdi,
Deoptimization::UnrollBlock::
caller_adjustment_offset_in_bytes()));
caller_adjustment_offset()));
__ subptr(rsp, rbx);
// Push interpreter frames in a loop
@ -2947,7 +2947,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
#ifdef ASSERT
{ Label L;
__ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
__ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset()),
Deoptimization::Unpack_uncommon_trap);
__ jcc(Assembler::equal, L);
__ stop("SharedRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
@ -2968,34 +2968,34 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// Pop deoptimized frame (int)
__ movl(rcx, Address(rdi,
Deoptimization::UnrollBlock::
size_of_deoptimized_frame_offset_in_bytes()));
size_of_deoptimized_frame_offset()));
__ addptr(rsp, rcx);
// rsp should be pointing at the return address to the caller (3)
// Pick up the initial fp we should save
// restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset()));
#ifdef ASSERT
// Compilers generate code that bang the stack by as much as the
// interpreter would need. So this stack banging should never
// trigger a fault. Verify that it does not on non product builds.
__ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
__ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset()));
__ bang_stack_size(rbx, rcx);
#endif
// Load address of array of frame pcs into rcx (address*)
__ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
__ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset()));
// Trash the return pc
__ addptr(rsp, wordSize);
// Load address of array of frame sizes into rsi (intptr_t*)
__ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock:: frame_sizes_offset_in_bytes()));
__ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock:: frame_sizes_offset()));
// Counter
__ movl(rdx, Address(rdi, Deoptimization::UnrollBlock:: number_of_frames_offset_in_bytes())); // (int)
__ movl(rdx, Address(rdi, Deoptimization::UnrollBlock:: number_of_frames_offset())); // (int)
// Now adjust the caller's stack to make up for the extra locals but
// record the original sp so that we can save it in the skeletal
@ -3005,7 +3005,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
const Register sender_sp = r8;
__ mov(sender_sp, rsp);
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset_in_bytes())); // (int)
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset())); // (int)
__ subptr(rsp, rbx);
// Push interpreter frames in a loop

View File

@ -606,7 +606,7 @@ void TemplateInterpreterGenerator::lock_method() {
__ subptr(rsp, entry_size); // add space for a monitor entry
__ movptr(monitor_block_top, rsp); // set new monitor block top
// store object
__ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
__ movptr(Address(rsp, BasicObjectLock::obj_offset()), rax);
const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
__ movptr(lockreg, rsp); // object address
__ lock_object(lockreg);
@ -647,7 +647,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
__ movptr(rdx, Address(rbx, Method::const_offset()));
__ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
__ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
__ movptr(rdx, Address(rdx, ConstantPool::cache_offset()));
__ push(rdx); // set constant pool cache
__ movptr(rax, rlocals);
@ -1154,7 +1154,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// reset handle block
__ movptr(t, Address(thread, JavaThread::active_handles_offset()));
__ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
__ movl(Address(t, JNIHandleBlock::top_offset()), NULL_WORD);
// If result is an oop unbox and store it in frame where gc will see it
// and result handler will pick it up
@ -1248,7 +1248,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// monitor expect in c_rarg1 for slow unlock path
__ lea(regmon, monitor); // address of first monitor
__ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes()));
__ movptr(t, Address(regmon, BasicObjectLock::obj_offset()));
__ testptr(t, t);
__ jcc(Assembler::notZero, unlock);

View File

@ -4341,11 +4341,11 @@ void TemplateTable::monitorenter() {
__ bind(loop);
// check if current entry is used
__ cmpptr(Address(rtop, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD);
__ cmpptr(Address(rtop, BasicObjectLock::obj_offset()), NULL_WORD);
// if not used then remember entry in rmon
__ cmovptr(Assembler::equal, rmon, rtop); // cmov => cmovptr
// check if current entry is for same object
__ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
__ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset()));
// if same object then stop searching
__ jccb(Assembler::equal, exit);
// otherwise advance to next entry
@ -4394,7 +4394,7 @@ void TemplateTable::monitorenter() {
__ increment(rbcp);
// store object
__ movptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), rax);
__ movptr(Address(rmon, BasicObjectLock::obj_offset()), rax);
__ lock_object(rmon);
// check to make sure this monitor doesn't cause stack overflow after locking
@ -4434,7 +4434,7 @@ void TemplateTable::monitorexit() {
__ bind(loop);
// check if current entry is for same object
__ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
__ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset()));
// if same object then stop searching
__ jcc(Assembler::equal, found);
// otherwise advance to next entry

View File

@ -392,9 +392,9 @@ void ZeroFrame::identify_vp_word(int frame_index,
(BasicObjectLock *) monitor_base - 1 - index);
intptr_t offset = (intptr_t) addr - monitor;
if (offset == BasicObjectLock::obj_offset_in_bytes())
if (offset == in_bytes(BasicObjectLock::obj_offset()))
snprintf(fieldbuf, buflen, "monitor[%d]->_obj", index);
else if (offset == BasicObjectLock::lock_offset_in_bytes())
else if (offset == in_bytes(BasicObjectLock::lock_offset()))
snprintf(fieldbuf, buflen, "monitor[%d]->_lock", index);
return;

View File

@ -289,12 +289,12 @@ ByteSize FrameMap::sp_offset_for_monitor_base(const int index) const {
ByteSize FrameMap::sp_offset_for_monitor_lock(int index) const {
check_monitor_index(index);
return sp_offset_for_monitor_base(index) + in_ByteSize(BasicObjectLock::lock_offset_in_bytes());;
return sp_offset_for_monitor_base(index) + BasicObjectLock::lock_offset();
}
ByteSize FrameMap::sp_offset_for_monitor_object(int index) const {
check_monitor_index(index);
return sp_offset_for_monitor_base(index) + in_ByteSize(BasicObjectLock::obj_offset_in_bytes());
return sp_offset_for_monitor_base(index) + BasicObjectLock::obj_offset();
}

View File

@ -303,8 +303,8 @@ class ClassLoaderData : public CHeapObj<mtClass> {
bool modules_defined() { return (_modules != nullptr); }
// Offsets
static ByteSize holder_offset() { return in_ByteSize(offset_of(ClassLoaderData, _holder)); }
static ByteSize keep_alive_offset() { return in_ByteSize(offset_of(ClassLoaderData, _keep_alive)); }
static ByteSize holder_offset() { return byte_offset_of(ClassLoaderData, _holder); }
static ByteSize keep_alive_offset() { return byte_offset_of(ClassLoaderData, _keep_alive); }
// Loaded class dictionary
Dictionary* dictionary() const { return _dictionary; }

View File

@ -699,9 +699,9 @@ public:
}
// support for code generation
static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); }
static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); }
static int state_offset() { return offset_of(nmethod, _state); }
static ByteSize verified_entry_point_offset() { return byte_offset_of(nmethod, _verified_entry_point); }
static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
virtual void metadata_do(MetadataClosure* f);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@
#include "jfr/support/jfrTraceIdExtension.hpp"
#define DEFINE_KLASS_TRACE_ID_OFFSET \
static ByteSize trace_id_offset() { return in_ByteSize(offset_of(InstanceKlass, _trace_id)); }
static ByteSize trace_id_offset() { return byte_offset_of(InstanceKlass, _trace_id); }
#define KLASS_TRACE_ID_OFFSET InstanceKlass::trace_id_offset()

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,12 +31,12 @@
#define DEFINE_THREAD_LOCAL_FIELD_JFR mutable JfrThreadLocal _jfr_thread_local
#define DEFINE_THREAD_LOCAL_OFFSET_JFR \
static ByteSize jfr_thread_local_offset() { return in_ByteSize(offset_of(Thread, _jfr_thread_local)); }
static ByteSize jfr_thread_local_offset() { return byte_offset_of(Thread, _jfr_thread_local); }
#define THREAD_LOCAL_OFFSET_JFR Thread::jfr_thread_local_offset()
#define DEFINE_THREAD_LOCAL_TRACE_ID_OFFSET_JFR \
static ByteSize trace_id_offset() { return in_ByteSize(offset_of(JfrThreadLocal, _trace_id)); }
static ByteSize trace_id_offset() { return byte_offset_of(JfrThreadLocal, _trace_id); }
#define DEFINE_THREAD_LOCAL_ACCESSOR_JFR \
JfrThreadLocal* jfr_thread_local() const { return &_jfr_thread_local; }

View File

@ -243,23 +243,23 @@ JfrStackFrame* JfrThreadLocal::install_stackframes() const {
}
ByteSize JfrThreadLocal::java_event_writer_offset() {
return in_ByteSize(offset_of(JfrThreadLocal, _java_event_writer));
return byte_offset_of(JfrThreadLocal, _java_event_writer);
}
ByteSize JfrThreadLocal::vthread_id_offset() {
return in_ByteSize(offset_of(JfrThreadLocal, _vthread_id));
return byte_offset_of(JfrThreadLocal, _vthread_id);
}
ByteSize JfrThreadLocal::vthread_offset() {
return in_ByteSize(offset_of(JfrThreadLocal, _vthread));
return byte_offset_of(JfrThreadLocal, _vthread);
}
ByteSize JfrThreadLocal::vthread_epoch_offset() {
return in_ByteSize(offset_of(JfrThreadLocal, _vthread_epoch));
return byte_offset_of(JfrThreadLocal, _vthread_epoch);
}
ByteSize JfrThreadLocal::vthread_excluded_offset() {
return in_ByteSize(offset_of(JfrThreadLocal, _vthread_excluded));
return byte_offset_of(JfrThreadLocal, _vthread_excluded);
}
void JfrThreadLocal::set(bool* exclusion_field, bool state) {

View File

@ -449,7 +449,7 @@ C2V_VMENTRY_NULL(jobject, getResolvedJavaType0, (JNIEnv* env, jobject, jobject b
} else if (!compressed) {
if (JVMCIENV->isa_HotSpotConstantPool(base_object)) {
ConstantPool* cp = JVMCIENV->asConstantPool(base_object);
if (offset == ConstantPool::pool_holder_offset_in_bytes()) {
if (offset == in_bytes(ConstantPool::pool_holder_offset())) {
klass = cp->pool_holder();
} else {
base_desc = FormatBufferResource("[constant pool for %s]", cp->pool_holder()->signature_name());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -69,8 +69,8 @@ class CompiledICHolder : public CHeapObj<mtCompiler> {
Klass* holder_klass() const { return _holder_klass; }
Metadata* holder_metadata() const { return _holder_metadata; }
static int holder_metadata_offset() { return offset_of(CompiledICHolder, _holder_metadata); }
static int holder_klass_offset() { return offset_of(CompiledICHolder, _holder_klass); }
static ByteSize holder_metadata_offset() { return byte_offset_of(CompiledICHolder, _holder_metadata); }
static ByteSize holder_klass_offset() { return byte_offset_of(CompiledICHolder, _holder_klass); }
CompiledICHolder* next() { return _next; }
void set_next(CompiledICHolder* n) { _next = n; }

View File

@ -264,10 +264,10 @@ class ConstantPool : public Metadata {
}
// Assembly code support
static int tags_offset_in_bytes() { return offset_of(ConstantPool, _tags); }
static int cache_offset_in_bytes() { return offset_of(ConstantPool, _cache); }
static int pool_holder_offset_in_bytes() { return offset_of(ConstantPool, _pool_holder); }
static int resolved_klasses_offset_in_bytes() { return offset_of(ConstantPool, _resolved_klasses); }
static ByteSize tags_offset() { return byte_offset_of(ConstantPool, _tags); }
static ByteSize cache_offset() { return byte_offset_of(ConstantPool, _cache); }
static ByteSize pool_holder_offset() { return byte_offset_of(ConstantPool, _pool_holder); }
static ByteSize resolved_klasses_offset() { return byte_offset_of(ConstantPool, _resolved_klasses); }
// Storing constants

View File

@ -452,8 +452,8 @@ class ConstantPoolCache: public MetaspaceObj {
}
// Assembly code support
static int resolved_references_offset_in_bytes() { return offset_of(ConstantPoolCache, _resolved_references); }
static ByteSize invokedynamic_entries_offset() { return byte_offset_of(ConstantPoolCache, _resolved_indy_entries); }
static ByteSize resolved_references_offset() { return byte_offset_of(ConstantPoolCache, _resolved_references); }
static ByteSize invokedynamic_entries_offset() { return byte_offset_of(ConstantPoolCache, _resolved_indy_entries); }
#if INCLUDE_CDS
void remove_unshareable_info();

View File

@ -547,7 +547,7 @@ public:
u2 this_class_index() const { return _this_class_index; }
void set_this_class_index(u2 index) { _this_class_index = index; }
static ByteSize reference_type_offset() { return in_ByteSize(offset_of(InstanceKlass, _reference_type)); }
static ByteSize reference_type_offset() { return byte_offset_of(InstanceKlass, _reference_type); }
// find local field, returns true if found
bool find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const;
@ -866,9 +866,9 @@ public:
#endif
// support for stub routines
static ByteSize init_state_offset() { return in_ByteSize(offset_of(InstanceKlass, _init_state)); }
static ByteSize init_state_offset() { return byte_offset_of(InstanceKlass, _init_state); }
JFR_ONLY(DEFINE_KLASS_TRACE_ID_OFFSET;)
static ByteSize init_thread_offset() { return in_ByteSize(offset_of(InstanceKlass, _init_thread)); }
static ByteSize init_thread_offset() { return byte_offset_of(InstanceKlass, _init_thread); }
// subclass/subinterface checks
bool implements_interface(Klass* k) const;

View File

@ -384,19 +384,19 @@ protected:
public:
// Compiler support
static ByteSize super_offset() { return in_ByteSize(offset_of(Klass, _super)); }
static ByteSize super_check_offset_offset() { return in_ByteSize(offset_of(Klass, _super_check_offset)); }
static ByteSize primary_supers_offset() { return in_ByteSize(offset_of(Klass, _primary_supers)); }
static ByteSize secondary_super_cache_offset() { return in_ByteSize(offset_of(Klass, _secondary_super_cache)); }
static ByteSize secondary_supers_offset() { return in_ByteSize(offset_of(Klass, _secondary_supers)); }
static ByteSize java_mirror_offset() { return in_ByteSize(offset_of(Klass, _java_mirror)); }
static ByteSize class_loader_data_offset() { return in_ByteSize(offset_of(Klass, _class_loader_data)); }
static ByteSize modifier_flags_offset() { return in_ByteSize(offset_of(Klass, _modifier_flags)); }
static ByteSize layout_helper_offset() { return in_ByteSize(offset_of(Klass, _layout_helper)); }
static ByteSize access_flags_offset() { return in_ByteSize(offset_of(Klass, _access_flags)); }
static ByteSize super_offset() { return byte_offset_of(Klass, _super); }
static ByteSize super_check_offset_offset() { return byte_offset_of(Klass, _super_check_offset); }
static ByteSize primary_supers_offset() { return byte_offset_of(Klass, _primary_supers); }
static ByteSize secondary_super_cache_offset() { return byte_offset_of(Klass, _secondary_super_cache); }
static ByteSize secondary_supers_offset() { return byte_offset_of(Klass, _secondary_supers); }
static ByteSize java_mirror_offset() { return byte_offset_of(Klass, _java_mirror); }
static ByteSize class_loader_data_offset() { return byte_offset_of(Klass, _class_loader_data); }
static ByteSize modifier_flags_offset() { return byte_offset_of(Klass, _modifier_flags); }
static ByteSize layout_helper_offset() { return byte_offset_of(Klass, _layout_helper); }
static ByteSize access_flags_offset() { return byte_offset_of(Klass, _access_flags); }
#if INCLUDE_JVMCI
static ByteSize subklass_offset() { return in_ByteSize(offset_of(Klass, _subklass)); }
static ByteSize next_sibling_offset() { return in_ByteSize(offset_of(Klass, _next_sibling)); }
static ByteSize subklass_offset() { return byte_offset_of(Klass, _subklass); }
static ByteSize next_sibling_offset() { return byte_offset_of(Klass, _next_sibling); }
#endif
// Unpacking layout_helper:

View File

@ -184,7 +184,7 @@ class vtableEntry {
static int size() { return sizeof(vtableEntry) / wordSize; }
static int size_in_bytes() { return sizeof(vtableEntry); }
static int method_offset_in_bytes() { return offset_of(vtableEntry, _method); }
static ByteSize method_offset() { return byte_offset_of(vtableEntry, _method); }
Method* method() const { return _method; }
Method** method_addr() { return &_method; }
@ -230,9 +230,9 @@ class itableOffsetEntry {
void initialize(InstanceKlass* interf, int offset) { _interface = interf; _offset = offset; }
// Static size and offset accessors
static int size() { return sizeof(itableOffsetEntry) / wordSize; } // size in words
static int interface_offset_in_bytes() { return offset_of(itableOffsetEntry, _interface); }
static int offset_offset_in_bytes() { return offset_of(itableOffsetEntry, _offset); }
static int size() { return sizeof(itableOffsetEntry) / wordSize; } // size in words
static ByteSize interface_offset() { return byte_offset_of(itableOffsetEntry, _interface); }
static ByteSize offset_offset() { return byte_offset_of(itableOffsetEntry, _offset); }
friend class klassItable;
};
@ -252,7 +252,7 @@ class itableMethodEntry {
// Static size and offset accessors
static int size() { return sizeof(itableMethodEntry) / wordSize; } // size in words
static int method_offset_in_bytes() { return offset_of(itableMethodEntry, _method); }
static ByteSize method_offset() { return byte_offset_of(itableMethodEntry, _method); }
friend class klassItable;
};

View File

@ -666,9 +666,7 @@ public:
static ByteSize access_flags_offset() { return byte_offset_of(Method, _access_flags ); }
static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); }
static ByteSize code_offset() { return byte_offset_of(Method, _code); }
static ByteSize method_data_offset() {
return byte_offset_of(Method, _method_data);
}
static ByteSize method_counters_offset() {
return byte_offset_of(Method, _method_counters);
}
@ -682,8 +680,8 @@ public:
static ByteSize itable_index_offset() { return byte_offset_of(Method, _vtable_index ); }
// for code generation
static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); }
static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); }
static ByteSize method_data_offset() { return byte_offset_of(Method, _method_data); }
static ByteSize intrinsic_id_offset() { return byte_offset_of(Method, _intrinsic_id); }
static int intrinsic_id_size_in_bytes() { return sizeof(u2); }
// Static methods that are used to implement member methods where an exposed this pointer

View File

@ -2253,8 +2253,8 @@ public:
Atomic::store(&_rtm_state, (int)rstate);
}
static int rtm_state_offset_in_bytes() {
return offset_of(MethodData, _rtm_state);
static ByteSize rtm_state_offset() {
return byte_offset_of(MethodData, _rtm_state);
}
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -65,7 +65,7 @@ class ObjArrayKlass : public ArrayKlass {
PackageEntry* package() const;
// Compiler/Interpreter offset
static ByteSize element_klass_offset() { return in_ByteSize(offset_of(ObjArrayKlass, _element_klass)); }
static ByteSize element_klass_offset() { return byte_offset_of(ObjArrayKlass, _element_klass); }
// Dispatched operation
bool can_be_primary_super_slow() const;

View File

@ -309,8 +309,8 @@ class oopDesc {
static bool has_klass_gap();
// for code generation
static int mark_offset_in_bytes() { return offset_of(oopDesc, _mark); }
static int klass_offset_in_bytes() { return offset_of(oopDesc, _metadata._klass); }
static int mark_offset_in_bytes() { return (int)offset_of(oopDesc, _mark); }
static int klass_offset_in_bytes() { return (int)offset_of(oopDesc, _metadata._klass); }
static int klass_gap_offset_in_bytes() {
assert(has_klass_gap(), "only applicable to compressed klass pointers");
return klass_offset_in_bytes() + sizeof(narrowKlass);

View File

@ -4237,7 +4237,7 @@ Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
// Get the Method* out of the appropriate vtable entry.
int entry_offset = in_bytes(Klass::vtable_start_offset()) +
vtable_index*vtableEntry::size_in_bytes() +
vtableEntry::method_offset_in_bytes();
in_bytes(vtableEntry::method_offset());
Node* entry_addr = basic_plus_adr(obj_klass, entry_offset);
Node* target_call = make_load(nullptr, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);

View File

@ -2171,7 +2171,7 @@ void Parse::rtm_deopt() {
// Load the rtm_state from the MethodData.
const TypePtr* adr_type = TypeMetadataPtr::make(C->method()->method_data());
Node* mdo = makecon(adr_type);
int offset = MethodData::rtm_state_offset_in_bytes();
int offset = in_bytes(MethodData::rtm_state_offset());
Node* adr_node = basic_plus_adr(mdo, mdo, offset);
Node* rtm_state = make_load(control(), adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "oops/markWord.hpp"
#include "runtime/atomic.hpp"
#include "runtime/handles.hpp"
#include "utilities/sizes.hpp"
class BasicLock {
friend class VMStructs;
@ -48,7 +49,7 @@ class BasicLock {
// move a basic lock (used during deoptimization
void move_to(oop obj, BasicLock* dest);
static int displaced_header_offset_in_bytes() { return offset_of(BasicLock, _displaced_header); }
static int displaced_header_offset_in_bytes() { return (int)offset_of(BasicLock, _displaced_header); }
};
// A BasicObjectLock associates a specific Java object with a BasicLock.
@ -79,8 +80,8 @@ class BasicObjectLock {
// GC support
void oops_do(OopClosure* f) { f->do_oop(&_obj); }
static int obj_offset_in_bytes() { return offset_of(BasicObjectLock, _obj); }
static int lock_offset_in_bytes() { return offset_of(BasicObjectLock, _lock); }
static ByteSize obj_offset() { return byte_offset_of(BasicObjectLock, _obj); }
static ByteSize lock_offset() { return byte_offset_of(BasicObjectLock, _lock); }
};

View File

@ -264,16 +264,16 @@ class Deoptimization : AllStatic {
int caller_actual_parameters() const { return _caller_actual_parameters; }
// Accessors used by the code generator for the unpack stub.
static int size_of_deoptimized_frame_offset_in_bytes() { return offset_of(UnrollBlock, _size_of_deoptimized_frame); }
static int caller_adjustment_offset_in_bytes() { return offset_of(UnrollBlock, _caller_adjustment); }
static int number_of_frames_offset_in_bytes() { return offset_of(UnrollBlock, _number_of_frames); }
static int frame_sizes_offset_in_bytes() { return offset_of(UnrollBlock, _frame_sizes); }
static int total_frame_sizes_offset_in_bytes() { return offset_of(UnrollBlock, _total_frame_sizes); }
static int frame_pcs_offset_in_bytes() { return offset_of(UnrollBlock, _frame_pcs); }
static int counter_temp_offset_in_bytes() { return offset_of(UnrollBlock, _counter_temp); }
static int initial_info_offset_in_bytes() { return offset_of(UnrollBlock, _initial_info); }
static int unpack_kind_offset_in_bytes() { return offset_of(UnrollBlock, _unpack_kind); }
static int sender_sp_temp_offset_in_bytes() { return offset_of(UnrollBlock, _sender_sp_temp); }
static ByteSize size_of_deoptimized_frame_offset() { return byte_offset_of(UnrollBlock, _size_of_deoptimized_frame); }
static ByteSize caller_adjustment_offset() { return byte_offset_of(UnrollBlock, _caller_adjustment); }
static ByteSize number_of_frames_offset() { return byte_offset_of(UnrollBlock, _number_of_frames); }
static ByteSize frame_sizes_offset() { return byte_offset_of(UnrollBlock, _frame_sizes); }
static ByteSize total_frame_sizes_offset() { return byte_offset_of(UnrollBlock, _total_frame_sizes); }
static ByteSize frame_pcs_offset() { return byte_offset_of(UnrollBlock, _frame_pcs); }
static ByteSize counter_temp_offset() { return byte_offset_of(UnrollBlock, _counter_temp); }
static ByteSize initial_info_offset() { return byte_offset_of(UnrollBlock, _initial_info); }
static ByteSize unpack_kind_offset() { return byte_offset_of(UnrollBlock, _unpack_kind); }
static ByteSize sender_sp_temp_offset() { return byte_offset_of(UnrollBlock, _sender_sp_temp); }
BasicType return_type() const { return _return_type; }
void print();

View File

@ -178,7 +178,7 @@ class JNIHandleBlock : public CHeapObj<mtInternal> {
void set_pop_frame_link(JNIHandleBlock* block) { _pop_frame_link = block; }
// Stub generator support
static int top_offset_in_bytes() { return offset_of(JNIHandleBlock, _top); }
static ByteSize top_offset() { return byte_offset_of(JNIHandleBlock, _top); }
// Garbage collection support
// Traversal of handles

View File

@ -217,13 +217,11 @@ private:
static int Knob_SpinLimit;
// TODO-FIXME: the "offset" routines should return a type of off_t instead of int ...
// ByteSize would also be an appropriate type.
static int owner_offset_in_bytes() { return offset_of(ObjectMonitor, _owner); }
static int recursions_offset_in_bytes() { return offset_of(ObjectMonitor, _recursions); }
static int cxq_offset_in_bytes() { return offset_of(ObjectMonitor, _cxq); }
static int succ_offset_in_bytes() { return offset_of(ObjectMonitor, _succ); }
static int EntryList_offset_in_bytes() { return offset_of(ObjectMonitor, _EntryList); }
static ByteSize owner_offset() { return byte_offset_of(ObjectMonitor, _owner); }
static ByteSize recursions_offset() { return byte_offset_of(ObjectMonitor, _recursions); }
static ByteSize cxq_offset() { return byte_offset_of(ObjectMonitor, _cxq); }
static ByteSize succ_offset() { return byte_offset_of(ObjectMonitor, _succ); }
static ByteSize EntryList_offset() { return byte_offset_of(ObjectMonitor, _EntryList); }
// ObjectMonitor references can be ORed with markWord::monitor_value
// as part of the ObjectMonitor tagging mechanism. When we combine an
@ -237,7 +235,7 @@ private:
// to the ObjectMonitor reference manipulation code:
//
#define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
((ObjectMonitor::f ## _offset_in_bytes()) - markWord::monitor_value)
((in_bytes(ObjectMonitor::f ## _offset())) - markWord::monitor_value)
markWord header() const;
volatile markWord* header_addr();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,6 +54,9 @@ constexpr ByteSize operator + (ByteSize x, ByteSize y) { return in_ByteSize(in_b
constexpr ByteSize operator - (ByteSize x, ByteSize y) { return in_ByteSize(in_bytes(x) - in_bytes(y)); }
constexpr ByteSize operator * (ByteSize x, int y) { return in_ByteSize(in_bytes(x) * y ); }
constexpr bool operator == (ByteSize x, int y) { return in_bytes(x) == y; }
constexpr bool operator != (ByteSize x, int y) { return in_bytes(x) != y; }
// Use the following #define to get C++ field member offsets
#define byte_offset_of(klass,field) in_ByteSize((int)offset_of(klass, field))

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,7 +39,7 @@ TEST_VM(ObjectMonitor, sanity) {
<< sizeof (PaddedEnd<ObjectMonitor>)
<< "; cache_line_size = " << cache_line_size;
EXPECT_GE((size_t) ObjectMonitor::owner_offset_in_bytes(), cache_line_size)
EXPECT_GE((size_t) in_bytes(ObjectMonitor::owner_offset()), cache_line_size)
<< "the _header and _owner fields are closer "
<< "than a cache line which permits false sharing.";
}