8320697: RISC-V: Small refactoring for runtime calls

Co-authored-by: Fei Yang <fyang@openjdk.org>
Reviewed-by: fyang, rehn
This commit is contained in:
Feilong Jiang 2023-12-05 07:05:57 +00:00
parent 50d1839d54
commit aec386596d
15 changed files with 116 additions and 227 deletions

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,7 +44,7 @@ void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
InternalAddress safepoint_pc(__ pc() - __ offset() + safepoint_offset());
__ relocate(safepoint_pc.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, safepoint_pc.target(), offset);
__ la(t0, safepoint_pc.target(), offset);
__ addi(t0, t0, offset);
});
__ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset()));
@ -92,12 +92,9 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ mv(t1, _array->as_pointer_register());
stub_id = Runtime1::throw_range_check_failed_id;
}
RuntimeAddress target(Runtime1::entry_for(stub_id));
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(ra, target, offset);
__ jalr(ra, ra, offset);
});
// t0 and t1 are used as args in generate_exception_throw
// so use ra as the tmp register for rt_call.
__ rt_call(Runtime1::entry_for(stub_id), ra);
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());

View File

@ -1425,7 +1425,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
InternalAddress pc_for_athrow(__ pc());
__ relocate(pc_for_athrow.rspec(), [&] {
int32_t offset;
__ la_patchable(exceptionPC->as_register(), pc_for_athrow, offset);
__ la(exceptionPC->as_register(), pc_for_athrow.target(), offset);
__ addi(exceptionPC->as_register(), exceptionPC->as_register(), offset);
});
add_call_info(pc_for_athrow_offset, info); // for exception handler
@ -1868,7 +1868,7 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* arg
RuntimeAddress target(dest);
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ movptr(t0, target.target(), offset);
__ jalr(x1, t0, offset);
});
}

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -67,12 +67,7 @@ int StubAssembler::call_RT(Register oop_result, Register metadata_result, addres
set_last_Java_frame(sp, fp, retaddr, t0);
// do the call
RuntimeAddress target(entry);
relocate(target.rspec(), [&] {
int32_t offset;
la_patchable(t0, target, offset);
jalr(x1, t0, offset);
});
rt_call(entry);
bind(retaddr);
int call_offset = offset();
// verify callee-saved register
@ -578,12 +573,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
Label retaddr;
__ set_last_Java_frame(sp, fp, retaddr, t0);
// do the call
RuntimeAddress addr(target);
__ relocate(addr.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, addr, offset);
__ jalr(x1, t0, offset);
});
__ rt_call(target);
__ bind(retaddr);
OopMapSet* oop_maps = new OopMapSet();
assert_cond(oop_maps != nullptr);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,7 +46,7 @@ void C2SafepointPollStub::emit(C2_MacroAssembler& masm) {
InternalAddress safepoint_pc(__ pc() - __ offset() + _safepoint_offset);
__ relocate(safepoint_pc.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, safepoint_pc.target(), offset);
__ la(t0, safepoint_pc.target(), offset);
__ addi(t0, t0, offset);
});
__ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset()));
@ -60,12 +60,7 @@ int C2EntryBarrierStub::max_size() const {
void C2EntryBarrierStub::emit(C2_MacroAssembler& masm) {
__ bind(entry());
RuntimeAddress target(StubRoutines::method_entry_barrier());
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ jalr(ra, t0, offset);
});
__ rt_call(StubRoutines::method_entry_barrier());
__ j(continuation());

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -187,7 +187,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
Label done;
Label runtime;
@ -204,7 +203,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
// storing region crossing non-null, is card already dirty?
ExternalAddress cardtable((address) ct->byte_map_base());
const Register card_addr = tmp1;
__ srli(card_addr, store_addr, CardTable::card_shift());
@ -410,7 +408,6 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
Label done;
Label runtime;

View File

@ -308,12 +308,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
Label skip_barrier;
__ beq(t0, t1, skip_barrier);
RuntimeAddress target(StubRoutines::method_entry_barrier());
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ jalr(ra, t0, offset);
});
__ rt_call(StubRoutines::method_entry_barrier());
__ j(skip_barrier);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -339,12 +339,8 @@ void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, X
XSaveLiveRegisters save_live_registers(masm, stub);
XSetupArguments setup_arguments(masm, stub);
Address target(stub->slow_path());
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ jalr(x1, t0, offset);
});
__ mv(t0, stub->slow_path());
__ jalr(t0);
}
// Stub exit

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,8 +36,7 @@
int InlineCacheBuffer::ic_stub_code_size() {
// 6: auipc + ld + auipc + jalr + address(2 * instruction_size)
// 5: auipc + ld + j + address(2 * instruction_size)
return (MacroAssembler::far_branches() ? 6 : 5) * NativeInstruction::instruction_size;
return 6 * NativeInstruction::instruction_size;
}
#define __ masm->

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -194,7 +194,7 @@ void InterpreterMacroAssembler::get_dispatch() {
ExternalAddress target((address)Interpreter::dispatch_table());
relocate(target.rspec(), [&] {
int32_t offset;
la_patchable(xdispatch, target, offset);
la(xdispatch, target.target(), offset);
addi(xdispatch, xdispatch, offset);
});
}

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 2004, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -76,7 +76,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
Address target(SafepointSynchronize::safepoint_counter_addr());
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(rcounter_addr, target, offset);
__ la(rcounter_addr, target.target(), offset);
__ addi(rcounter_addr, rcounter_addr, offset);
});
@ -96,7 +96,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
ExternalAddress target((address) JvmtiExport::get_field_access_count_addr());
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(result, target, offset);
__ la(result, target.target(), offset);
__ lwu(result, Address(result, offset));
});
__ bnez(result, slow);
@ -176,7 +176,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
ExternalAddress target(slow_case_addr);
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ la(t0, target.target(), offset);
__ jalr(x1, t0, offset);
});
__ leave();

View File

@ -340,7 +340,7 @@ void MacroAssembler::call_VM_base(Register oop_result,
RuntimeAddress target(StubRoutines::forward_exception_entry());
relocate(target.rspec(), [&] {
int32_t offset;
la_patchable(t0, target, offset);
la(t0, target.target(), offset);
jalr(x0, t0, offset);
});
bind(ok);
@ -421,7 +421,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file,
ExternalAddress target(StubRoutines::verify_oop_subroutine_entry_address());
relocate(target.rspec(), [&] {
int32_t offset;
la_patchable(t1, target, offset);
la(t1, target.target(), offset);
ld(t1, Address(t1, offset));
});
jalr(t1);
@ -466,7 +466,7 @@ void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* f
ExternalAddress target(StubRoutines::verify_oop_subroutine_entry_address());
relocate(target.rspec(), [&] {
int32_t offset;
la_patchable(t1, target, offset);
la(t1, target.target(), offset);
ld(t1, Address(t1, offset));
});
jalr(t1);
@ -717,13 +717,35 @@ void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Reg
MacroAssembler::call_VM_leaf_base(entry_point, 4);
}
void MacroAssembler::la(Register Rd, const address dest) {
int64_t offset = dest - pc();
void MacroAssembler::la(Register Rd, const address addr) {
int64_t offset = addr - pc();
if (is_simm32(offset)) {
auipc(Rd, (int32_t)offset + 0x800); //0x800, Note:the 11th sign bit
addi(Rd, Rd, ((int64_t)offset << 52) >> 52);
} else {
movptr(Rd, dest);
movptr(Rd, addr);
}
}
void MacroAssembler::la(Register Rd, const address addr, int32_t &offset) {
assert((uintptr_t)addr < (1ull << 48), "bad address");
unsigned long target_address = (uintptr_t)addr;
unsigned long low_address = (uintptr_t)CodeCache::low_bound();
unsigned long high_address = (uintptr_t)CodeCache::high_bound();
long offset_low = target_address - low_address;
long offset_high = target_address - high_address;
// RISC-V doesn't compute a page-aligned address, in order to partially
// compensate for the use of *signed* offsets in its base+disp12
// addressing mode (RISC-V's PC-relative reach remains asymmetric
// [-(2G + 2K), 2G - 2K).
if (offset_high >= -((1L << 31) + (1L << 11)) && offset_low < (1L << 31) - (1L << 11)) {
int64_t distance = addr - pc();
auipc(Rd, (int32_t)distance + 0x800);
offset = ((int32_t)distance << 20) >> 20;
} else {
movptr(Rd, addr, offset);
}
}
@ -1564,7 +1586,7 @@ void MacroAssembler::reinit_heapbase() {
ExternalAddress target(CompressedOops::ptrs_base_addr());
relocate(target.rspec(), [&] {
int32_t offset;
la_patchable(xheapbase, target, offset);
la(xheapbase, target.target(), offset);
ld(xheapbase, Address(xheapbase, offset));
});
}
@ -2119,7 +2141,7 @@ SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value
ExternalAddress target((address)flag_addr);
_masm->relocate(target.rspec(), [&] {
int32_t offset;
_masm->la_patchable(t0, target, offset);
_masm->la(t0, target.target(), offset);
_masm->lbu(t0, Address(t0, offset));
});
if (value) {
@ -2996,46 +3018,36 @@ ATOMIC_XCHGU(xchgalwu, xchgalw)
#undef ATOMIC_XCHGU
void MacroAssembler::far_jump(Address entry, Register tmp) {
void MacroAssembler::far_jump(const Address &entry, Register tmp) {
assert(ReservedCodeCacheSize < 4*G, "branch out of range");
assert(CodeCache::find_blob(entry.target()) != nullptr,
"destination of far call not found in code cache");
assert(entry.rspec().type() == relocInfo::external_word_type
|| entry.rspec().type() == relocInfo::runtime_call_type
|| entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
IncompressibleRegion ir(this); // Fixed length: see MacroAssembler::far_branch_size()
if (far_branches()) {
// We can use auipc + jalr here because we know that the total size of
// the code cache cannot exceed 2Gb.
relocate(entry.rspec(), [&] {
int32_t offset;
la_patchable(tmp, entry, offset);
jalr(x0, tmp, offset);
});
} else {
j(entry);
}
// Fixed length: see MacroAssembler::far_branch_size()
relocate(entry.rspec(), [&] {
int32_t offset;
la(tmp, entry.target(), offset);
jalr(x0, tmp, offset);
});
}
void MacroAssembler::far_call(Address entry, Register tmp) {
void MacroAssembler::far_call(const Address &entry, Register tmp) {
assert(ReservedCodeCacheSize < 4*G, "branch out of range");
assert(CodeCache::find_blob(entry.target()) != nullptr,
"destination of far call not found in code cache");
assert(entry.rspec().type() == relocInfo::external_word_type
|| entry.rspec().type() == relocInfo::runtime_call_type
|| entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
IncompressibleRegion ir(this); // Fixed length: see MacroAssembler::far_branch_size()
if (far_branches()) {
// We can use auipc + jalr here because we know that the total size of
// the code cache cannot exceed 2Gb.
relocate(entry.rspec(), [&] {
int32_t offset;
la_patchable(tmp, entry, offset);
jalr(x1, tmp, offset); // link
});
} else {
jal(entry); // link
}
// Fixed length: see MacroAssembler::far_branch_size()
// We can use auipc + jalr here because we know that the total size of
// the code cache cannot exceed 2Gb.
relocate(entry.rspec(), [&] {
int32_t offset;
la(tmp, entry.target(), offset);
jalr(x1, tmp, offset); // link
});
}
void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
@ -3258,29 +3270,6 @@ void MacroAssembler::load_byte_map_base(Register reg) {
mv(reg, (uint64_t)byte_map_base);
}
void MacroAssembler::la_patchable(Register reg1, const Address &dest, int32_t &offset) {
unsigned long low_address = (uintptr_t)CodeCache::low_bound();
unsigned long high_address = (uintptr_t)CodeCache::high_bound();
unsigned long dest_address = (uintptr_t)dest.target();
long offset_low = dest_address - low_address;
long offset_high = dest_address - high_address;
assert(dest.getMode() == Address::literal, "la_patchable must be applied to a literal address");
assert((uintptr_t)dest.target() < (1ull << 48), "bad address");
// RISC-V doesn't compute a page-aligned address, in order to partially
// compensate for the use of *signed* offsets in its base+disp12
// addressing mode (RISC-V's PC-relative reach remains asymmetric
// [-(2G + 2K), 2G - 2K).
if (offset_high >= -((1L << 31) + (1L << 11)) && offset_low < (1L << 31) - (1L << 11)) {
int64_t distance = dest.target() - pc();
auipc(reg1, (int32_t)distance + 0x800);
offset = ((int32_t)distance << 20) >> 20;
} else {
movptr(reg1, dest.target(), offset);
}
}
void MacroAssembler::build_frame(int framesize) {
assert(framesize >= 2, "framesize must include space for FP/RA");
assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
@ -3307,21 +3296,16 @@ void MacroAssembler::reserved_stack_check() {
enter(); // RA and FP are live.
mv(c_rarg0, xthread);
RuntimeAddress target(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone));
relocate(target.rspec(), [&] {
int32_t offset;
la_patchable(t0, target, offset);
jalr(x1, t0, offset);
});
rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone));
leave();
// We have already removed our own frame.
// throw_delayed_StackOverflowError will think that it's been
// called by our caller.
target = RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry());
RuntimeAddress target(StubRoutines::throw_delayed_StackOverflowError_entry());
relocate(target.rspec(), [&] {
int32_t offset;
la_patchable(t0, target, offset);
movptr(t0, target.target(), offset);
jalr(x0, t0, offset);
});
should_not_reach_here();
@ -3383,21 +3367,19 @@ address MacroAssembler::trampoline_call(Address entry) {
address target = entry.target();
// We need a trampoline if branches are far.
if (far_branches()) {
if (!in_scratch_emit_size()) {
if (entry.rspec().type() == relocInfo::runtime_call_type) {
assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs");
code()->share_trampoline_for(entry.target(), offset());
} else {
address stub = emit_trampoline_stub(offset(), target);
if (stub == nullptr) {
postcond(pc() == badAddress);
return nullptr; // CodeCache is full
}
if (!in_scratch_emit_size()) {
if (entry.rspec().type() == relocInfo::runtime_call_type) {
assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs");
code()->share_trampoline_for(entry.target(), offset());
} else {
address stub = emit_trampoline_stub(offset(), target);
if (stub == nullptr) {
postcond(pc() == badAddress);
return nullptr; // CodeCache is full
}
}
target = pc();
}
target = pc();
address call_pc = pc();
#ifdef ASSERT
@ -3545,7 +3527,7 @@ void MacroAssembler::cmpptr(Register src1, Address src2, Label& equal) {
assert_different_registers(src1, t0);
relocate(src2.rspec(), [&] {
int32_t offset;
la_patchable(t0, src2, offset);
la(t0, src2.target(), offset);
ld(t0, Address(t0, offset));
});
beq(src1, t0, equal);
@ -4193,7 +4175,7 @@ address MacroAssembler::zero_words(Register ptr, Register cnt) {
Label around, done, done16;
bltu(cnt, t0, around);
{
RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::riscv::zero_blocks());
RuntimeAddress zero_blocks(StubRoutines::riscv::zero_blocks());
assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated");
if (StubRoutines::riscv::complete()) {
address tpc = trampoline_call(zero_blocks);
@ -4788,11 +4770,11 @@ void MacroAssembler::rt_call(address dest, Register tmp) {
CodeBlob *cb = CodeCache::find_blob(dest);
RuntimeAddress target(dest);
if (cb) {
far_call(target);
far_call(target, tmp);
} else {
relocate(target.rspec(), [&] {
int32_t offset;
la_patchable(tmp, target, offset);
movptr(tmp, target.target(), offset);
jalr(x1, tmp, offset);
});
}

View File

@ -718,7 +718,8 @@ public:
compare_and_branch_label_insn neg_insn, bool is_far = false);
void la(Register Rd, Label &label);
void la(Register Rd, const address dest);
void la(Register Rd, const address addr);
void la(Register Rd, const address addr, int32_t &offset);
void la(Register Rd, const Address &adr);
void li16u(Register Rd, uint16_t imm);
@ -1062,28 +1063,18 @@ public:
void atomic_xchgwu(Register prev, Register newv, Register addr);
void atomic_xchgalwu(Register prev, Register newv, Register addr);
static bool far_branches() {
return ReservedCodeCacheSize > branch_range;
}
// Emit a direct call/jump if the entry address will always be in range,
// otherwise a far call/jump.
// Emit a far call/jump. Only invalidates the tmp register which
// is used to keep the entry address for jalr.
// The address must be inside the code cache.
// Supported entry.rspec():
// - relocInfo::external_word_type
// - relocInfo::runtime_call_type
// - relocInfo::none
// In the case of a far call/jump, the entry address is put in the tmp register.
// The tmp register is invalidated.
void far_call(Address entry, Register tmp = t0);
void far_jump(Address entry, Register tmp = t0);
void far_call(const Address &entry, Register tmp = t0);
void far_jump(const Address &entry, Register tmp = t0);
static int far_branch_size() {
if (far_branches()) {
return 2 * 4; // auipc + jalr, see far_call() & far_jump()
} else {
return 4;
}
}
void load_byte_map_base(Register reg);
@ -1095,8 +1086,6 @@ public:
sd(zr, Address(t0));
}
void la_patchable(Register reg1, const Address &dest, int32_t &offset);
virtual void _call_Unimplemented(address call_site) {
mv(t1, call_site);
}
@ -1430,6 +1419,8 @@ public:
VMRegPair dst,
bool is_receiver,
int* receiver_offset);
// Emit a runtime call. Only invalidates the tmp register which
// is used to keep the entry address for jalr/movptr.
void rt_call(address dest, Register tmp = t0);
void call(const address dest, Register temp = t0) {
@ -1469,7 +1460,7 @@ private:
InternalAddress target(const_addr.target());
relocate(target.rspec(), [&] {
int32_t offset;
la_patchable(dest, target, offset);
la(dest, target.target(), offset);
ld(dest, Address(dest, offset));
});
}

View File

@ -1848,10 +1848,8 @@ uint MachUEPNode::size(PhaseRegAlloc* ra_) const
// Emit exception handler code.
int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
{
// la_patchable t0, #exception_blob_entry_point
// auipc t0, #exception_blob_entry_point
// jr (offset)t0
// or
// j #exception_blob_entry_point
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
C2_MacroAssembler _masm(&cbuf);

View File

@ -344,12 +344,7 @@ static void patch_callers_callsite(MacroAssembler *masm) {
__ mv(c_rarg0, xmethod);
__ mv(c_rarg1, ra);
RuntimeAddress target(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ jalr(x1, t0, offset);
});
__ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
__ pop_CPU_state();
// restore sp
@ -1622,7 +1617,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
ExternalAddress target((address)&DTraceMethodProbes);
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ la(t0, target.target(), offset);
__ lbu(t0, Address(t0, offset));
});
__ bnez(t0, dtrace_method_entry);
@ -1846,7 +1841,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
ExternalAddress target((address)&DTraceMethodProbes);
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ la(t0, target.target(), offset);
__ lbu(t0, Address(t0, offset));
});
__ bnez(t0, dtrace_method_exit);
@ -1979,12 +1974,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
#ifndef PRODUCT
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
#endif
RuntimeAddress target(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ jalr(x1, t0, offset);
});
__ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
// Restore any method result value
restore_native_result(masm, ret_type, stack_slots);
@ -2156,12 +2146,7 @@ void SharedRuntime::generate_deopt_blob() {
__ mv(xcpool, Deoptimization::Unpack_reexecute);
__ mv(c_rarg0, xthread);
__ orrw(c_rarg2, zr, xcpool); // exec mode
RuntimeAddress target(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap));
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ jalr(x1, t0, offset);
});
__ rt_call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap));
__ bind(retaddr);
oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
@ -2253,12 +2238,7 @@ void SharedRuntime::generate_deopt_blob() {
#endif // ASSERT
__ mv(c_rarg0, xthread);
__ mv(c_rarg1, xcpool);
RuntimeAddress target(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info));
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ jalr(x1, t0, offset);
});
__ rt_call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info));
__ bind(retaddr);
// Need to have an oopmap that tells fetch_unroll_info where to
@ -2400,12 +2380,7 @@ void SharedRuntime::generate_deopt_blob() {
__ mv(c_rarg0, xthread);
__ mv(c_rarg1, xcpool); // second arg: exec_mode
target = RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames));
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ jalr(x1, t0, offset);
});
__ rt_call(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames));
// Set an oopmap for the call site
// Use the same PC we used for the last java frame
@ -2495,12 +2470,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ mv(c_rarg0, xthread);
__ mv(c_rarg2, Deoptimization::Unpack_uncommon_trap);
RuntimeAddress target(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap));
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ jalr(x1, t0, offset);
});
__ rt_call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap));
__ bind(retaddr);
// Set an oopmap for the call site
@ -2622,12 +2592,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// sp should already be aligned
__ mv(c_rarg0, xthread);
__ mv(c_rarg1, Deoptimization::Unpack_uncommon_trap);
target = RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames));
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ jalr(x1, t0, offset);
});
__ rt_call(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames));
// Set an oopmap for the call site
// Use the same PC we used for the last java frame
@ -2696,12 +2661,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// Do the call
__ mv(c_rarg0, xthread);
RuntimeAddress target(call_ptr);
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ jalr(x1, t0, offset);
});
__ rt_call(call_ptr);
__ bind(retaddr);
// Set an oopmap for the call site. This oopmap will map all
@ -2809,12 +2769,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
__ set_last_Java_frame(sp, noreg, retaddr, t0);
__ mv(c_rarg0, xthread);
RuntimeAddress target(destination);
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ jalr(x1, t0, offset);
});
__ rt_call(destination);
__ bind(retaddr);
}
@ -2943,13 +2898,7 @@ void OptoRuntime::generate_exception_blob() {
address the_pc = __ pc();
__ set_last_Java_frame(sp, noreg, the_pc, t0);
__ mv(c_rarg0, xthread);
RuntimeAddress target(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C));
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ jalr(x1, t0, offset);
});
__ rt_call(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C));
// handle_exception_C is a special VM call which does not require an explicit
// instruction sync afterwards.

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2471,7 +2471,7 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index,
ExternalAddress target((address) JvmtiExport::get_field_access_count_addr());
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ la(t0, target.target(), offset);
__ lwu(x10, Address(t0, offset));
});
@ -2682,7 +2682,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
ExternalAddress target((address)JvmtiExport::get_field_modification_count_addr());
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ la(t0, target.target(), offset);
__ lwu(x10, Address(t0, offset));
});
__ beqz(x10, L1);
@ -2975,7 +2975,7 @@ void TemplateTable::jvmti_post_fast_field_mod() {
ExternalAddress target((address)JvmtiExport::get_field_modification_count_addr());
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ la(t0, target.target(), offset);
__ lwu(c_rarg3, Address(t0, offset));
});
__ beqz(c_rarg3, L2);
@ -3111,7 +3111,7 @@ void TemplateTable::fast_accessfield(TosState state) {
ExternalAddress target((address)JvmtiExport::get_field_access_count_addr());
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la_patchable(t0, target, offset);
__ la(t0, target.target(), offset);
__ lwu(x12, Address(t0, offset));
});
__ beqz(x12, L1);