8326306: RISC-V: Re-structure MASM calls and jumps

Reviewed-by: fyang, luhenry
This commit is contained in:
Robbin Ehn 2024-05-22 11:47:54 +00:00
parent 8a9d77d58d
commit c3bc23fe48
9 changed files with 267 additions and 266 deletions

@ -656,39 +656,35 @@ public:
#undef INSN
#define INSN(NAME, op) \
void NAME(Register Rd, const int32_t offset) { \
guarantee(is_simm21(offset) && ((offset % 2) == 0), "offset is invalid."); \
unsigned insn = 0; \
patch((address)&insn, 6, 0, op); \
patch_reg((address)&insn, 7, Rd); \
patch((address)&insn, 19, 12, (uint32_t)((offset >> 12) & 0xff)); \
patch((address)&insn, 20, (uint32_t)((offset >> 11) & 0x1)); \
patch((address)&insn, 30, 21, (uint32_t)((offset >> 1) & 0x3ff)); \
patch((address)&insn, 31, (uint32_t)((offset >> 20) & 0x1)); \
emit(insn); \
private:
// All calls and jumps must go via MASM.
// Format J-type
void _jal(Register Rd, const int32_t offset) {
guarantee(is_simm21(offset) && ((offset % 2) == 0), "offset is invalid.");
unsigned insn = 0;
patch((address)&insn, 6, 0, 0b1101111);
patch_reg((address)&insn, 7, Rd);
patch((address)&insn, 19, 12, (uint32_t)((offset >> 12) & 0xff));
patch((address)&insn, 20, (uint32_t)((offset >> 11) & 0x1));
patch((address)&insn, 30, 21, (uint32_t)((offset >> 1) & 0x3ff));
patch((address)&insn, 31, (uint32_t)((offset >> 20) & 0x1));
emit(insn);
}
INSN(jal, 0b1101111);
#undef INSN
#define INSN(NAME, op, funct) \
void NAME(Register Rd, Register Rs, const int32_t offset) { \
guarantee(is_simm12(offset), "offset is invalid."); \
unsigned insn = 0; \
patch((address)&insn, 6, 0, op); \
patch_reg((address)&insn, 7, Rd); \
patch((address)&insn, 14, 12, funct); \
patch_reg((address)&insn, 15, Rs); \
int32_t val = offset & 0xfff; \
patch((address)&insn, 31, 20, val); \
emit(insn); \
// Format I-type
void _jalr(Register Rd, Register Rs, const int32_t offset) {
guarantee(is_simm12(offset), "offset is invalid.");
unsigned insn = 0;
patch((address)&insn, 6, 0, 0b1100111);
patch_reg((address)&insn, 7, Rd);
patch((address)&insn, 14, 12, 0b000);
patch_reg((address)&insn, 15, Rs);
int32_t val = offset & 0xfff;
patch((address)&insn, 31, 20, val);
emit(insn);
}
INSN(_jalr, 0b1100111, 0b000);
#undef INSN
public:
enum barrier {
i = 0b1000, o = 0b0100, r = 0b0010, w = 0b0001,
@ -2294,21 +2290,23 @@ public:
#undef INSN
#define INSN(NAME, funct4, op) \
void NAME(Register Rs1) { \
assert_cond(Rs1 != x0); \
uint16_t insn = 0; \
c_patch((address)&insn, 1, 0, op); \
c_patch_reg((address)&insn, 2, x0); \
c_patch_reg((address)&insn, 7, Rs1); \
c_patch((address)&insn, 15, 12, funct4); \
emit_int16(insn); \
private:
// All calls and jumps must go via MASM.
// Format CR, c.jr/c.jalr
// Note C instruction can't be changed, i.e. relocation patching.
template <uint8_t InstructionType, uint8_t FunctionType>
void c_cr_if(Register Rs1) {
assert_cond(Rs1 != x0);
uint16_t insn = 0;
c_patch((address)&insn, 1, 0, FunctionType);
c_patch_reg((address)&insn, 2, x0);
c_patch_reg((address)&insn, 7, Rs1);
c_patch((address)&insn, 15, 12, InstructionType);
emit_int16(insn);
}
INSN(c_jr, 0b1000, 0b10);
INSN(c_jalr, 0b1001, 0b10);
#undef INSN
void c_jr(Register Rs1) { c_cr_if<0b1000, 0b10>(Rs1); }
void c_jalr(Register Rs1) { c_cr_if<0b1001, 0b10>(Rs1); }
typedef void (Assembler::* j_c_insn)(address dest);
typedef void (Assembler::* compare_and_branch_c_insn)(Register Rs1, address dest);
@ -2331,35 +2329,36 @@ public:
}
}
#define INSN(NAME, funct3, op) \
void NAME(int32_t offset) { \
assert(is_simm12(offset) && ((offset % 2) == 0), "invalid encoding"); \
uint16_t insn = 0; \
c_patch((address)&insn, 1, 0, op); \
c_patch((address)&insn, 2, 2, (offset & nth_bit(5)) >> 5); \
c_patch((address)&insn, 5, 3, (offset & right_n_bits(4)) >> 1); \
c_patch((address)&insn, 6, 6, (offset & nth_bit(7)) >> 7); \
c_patch((address)&insn, 7, 7, (offset & nth_bit(6)) >> 6); \
c_patch((address)&insn, 8, 8, (offset & nth_bit(10)) >> 10); \
c_patch((address)&insn, 10, 9, (offset & right_n_bits(10)) >> 8); \
c_patch((address)&insn, 11, 11, (offset & nth_bit(4)) >> 4); \
c_patch((address)&insn, 12, 12, (offset & nth_bit(11)) >> 11); \
c_patch((address)&insn, 15, 13, funct3); \
emit_int16(insn); \
} \
void NAME(address dest) { \
assert_cond(dest != nullptr); \
int64_t distance = dest - pc(); \
assert(is_simm12(distance) && ((distance % 2) == 0), "invalid encoding"); \
c_j(distance); \
} \
void NAME(Label &L) { \
wrap_label(L, &Assembler::NAME); \
// Format CJ, c.j (c.jal)
// Note C instruction can't be changed, i.e. relocation patching.
void c_j(int32_t offset) {
assert(is_simm12(offset) && ((offset % 2) == 0), "invalid encoding");
uint16_t insn = 0;
c_patch((address)&insn, 1, 0, 0b01);
c_patch((address)&insn, 2, 2, (offset & nth_bit(5)) >> 5);
c_patch((address)&insn, 5, 3, (offset & right_n_bits(4)) >> 1);
c_patch((address)&insn, 6, 6, (offset & nth_bit(7)) >> 7);
c_patch((address)&insn, 7, 7, (offset & nth_bit(6)) >> 6);
c_patch((address)&insn, 8, 8, (offset & nth_bit(10)) >> 10);
c_patch((address)&insn, 10, 9, (offset & right_n_bits(10)) >> 8);
c_patch((address)&insn, 11, 11, (offset & nth_bit(4)) >> 4);
c_patch((address)&insn, 12, 12, (offset & nth_bit(11)) >> 11);
c_patch((address)&insn, 15, 13, 0b101);
emit_int16(insn);
}
INSN(c_j, 0b101, 0b01);
void c_j(address dest) {
assert_cond(dest != nullptr);
int64_t distance = dest - pc();
assert(is_simm12(distance) && ((distance % 2) == 0), "invalid encoding");
c_j(distance);
}
#undef INSN
void c_j(Label &L) {
wrap_label(L, &Assembler::c_j);
}
public:
#define INSN(NAME, funct3, op) \
void NAME(Register Rs1, int32_t imm) { \
@ -2812,24 +2811,35 @@ public:
// --------------------------
// Unconditional branch instructions
// --------------------------
#define INSN(NAME) \
void NAME(Register Rd, Register Rs, const int32_t offset) { \
/* jalr -> c.jr/c.jalr */ \
if (do_compress() && (offset == 0 && Rs != x0)) { \
if (Rd == x1) { \
c_jalr(Rs); \
return; \
} else if (Rd == x0) { \
c_jr(Rs); \
return; \
} \
} \
_jalr(Rd, Rs, offset); \
protected:
// All calls and jumps must go via MASM.
void jalr(Register Rd, Register Rs, const int32_t offset) {
/* jalr -> c.jr/c.jalr */
if (do_compress() && (offset == 0 && Rs != x0)) {
if (Rd == x1) {
c_jalr(Rs);
return;
} else if (Rd == x0) {
c_jr(Rs);
return;
}
}
_jalr(Rd, Rs, offset);
}
INSN(jalr);
void jal(Register Rd, const int32_t offset) {
/* jal -> c.j, note c.jal is RV32C only */
if (do_compress() &&
Rd == x0 &&
is_simm12(offset) && ((offset % 2) == 0)) {
c_j(offset);
return;
}
#undef INSN
_jal(Rd, offset);
}
public:
// --------------------------
// Miscellaneous Instructions
@ -3009,18 +3019,6 @@ public:
#undef INSN
// ---------------------------------------------------------------------------------------
#define INSN(NAME, REGISTER) \
void NAME(Register Rs) { \
jalr(REGISTER, Rs, 0); \
}
INSN(jr, x0);
INSN(jalr, x1);
#undef INSN
// -------------- ZCB Instruction Definitions --------------
// Zcb additional C instructions
private:

@ -1841,17 +1841,7 @@ void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, C
void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
assert(!tmp->is_valid(), "don't need temporary");
CodeBlob *cb = CodeCache::find_blob(dest);
if (cb != nullptr) {
__ far_call(RuntimeAddress(dest));
} else {
RuntimeAddress target(dest);
__ relocate(target.rspec(), [&] {
int32_t offset;
__ movptr(t0, target.target(), offset);
__ jalr(x1, t0, offset);
});
}
__ rt_call(dest);
if (info != nullptr) {
add_call_info_here(info);

@ -300,7 +300,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
assert(!is_narrow, "phantom access cannot be narrow");
target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
}
__ call(target);
__ rt_call(target);
__ mv(t0, x10);
__ pop_call_clobbered_registers();
__ mv(x10, t0);
@ -703,7 +703,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_s
assert(is_native, "phantom must only be called off-heap");
target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
}
__ call(target);
__ rt_call(target);
__ mv(t0, x10);
__ pop_call_clobbered_registers();
__ mv(x10, t0);

@ -177,7 +177,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la(t0, target.target(), offset);
__ jalr(x1, t0, offset);
__ jalr(t0, offset);
});
__ leave();
__ ret();

@ -339,7 +339,7 @@ void MacroAssembler::call_VM_base(Register oop_result,
relocate(target.rspec(), [&] {
int32_t offset;
la(t0, target.target(), offset);
jalr(x0, t0, offset);
jr(t0, offset);
});
bind(ok);
}
@ -641,14 +641,16 @@ void MacroAssembler::emit_static_call_stub() {
// Jump to the entry point of the c2i stub.
int32_t offset = 0;
movptr(t0, 0, offset);
jalr(x0, t0, offset);
jr(t0, offset);
}
void MacroAssembler::call_VM_leaf_base(address entry_point,
int number_of_arguments,
Label *retaddr) {
int32_t offset = 0;
push_reg(RegSet::of(t0, xmethod), sp); // push << t0 & xmethod >> to sp
call(entry_point);
mv(t0, entry_point, offset);
jalr(t0, offset);
if (retaddr != nullptr) {
bind(*retaddr);
}
@ -716,33 +718,19 @@ void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Reg
}
void MacroAssembler::la(Register Rd, const address addr) {
int64_t offset = addr - pc();
if (is_valid_32bit_offset(offset)) {
auipc(Rd, (int32_t)offset + 0x800); //0x800, Note:the 11th sign bit
addi(Rd, Rd, ((int64_t)offset << 52) >> 52);
} else {
movptr(Rd, addr);
}
int32_t offset;
la(Rd, addr, offset);
addi(Rd, Rd, offset);
}
void MacroAssembler::la(Register Rd, const address addr, int32_t &offset) {
assert((uintptr_t)addr < (1ull << 48), "bad address");
unsigned long target_address = (uintptr_t)addr;
unsigned long low_address = (uintptr_t)CodeCache::low_bound();
unsigned long high_address = (uintptr_t)CodeCache::high_bound();
long offset_low = target_address - low_address;
long offset_high = target_address - high_address;
// RISC-V doesn't compute a page-aligned address, in order to partially
// compensate for the use of *signed* offsets in its base+disp12
// addressing mode (RISC-V's PC-relative reach remains asymmetric
// [-(2G + 2K), 2G - 2K).
if (offset_high >= -((1L << 31) + (1L << 11)) && offset_low < (1L << 31) - (1L << 11)) {
if (is_32bit_offset_from_codecache((int64_t)addr)) {
int64_t distance = addr - pc();
assert(is_valid_32bit_offset(distance), "Must be");
auipc(Rd, (int32_t)distance + 0x800);
offset = ((int32_t)distance << 20) >> 20;
} else {
assert(!CodeCache::contains(addr), "Must be");
movptr(Rd, addr, offset);
}
}
@ -859,88 +847,113 @@ void MacroAssembler::li(Register Rd, int64_t imm) {
}
}
#define INSN(NAME, REGISTER) \
void MacroAssembler::NAME(const address dest, Register temp) { \
assert_cond(dest != nullptr); \
int64_t distance = dest - pc(); \
if (is_simm21(distance) && ((distance % 2) == 0)) { \
Assembler::jal(REGISTER, distance); \
} else { \
assert(temp != noreg, "expecting a register"); \
int32_t offset = 0; \
movptr(temp, dest, offset); \
Assembler::jalr(REGISTER, temp, offset); \
} \
} \
INSN(j, x0);
INSN(jal, x1);
#undef INSN
#define INSN(NAME, REGISTER) \
void MacroAssembler::NAME(const Address &adr, Register temp) { \
switch (adr.getMode()) { \
case Address::literal: { \
relocate(adr.rspec(), [&] { \
NAME(adr.target(), temp); \
}); \
break; \
} \
case Address::base_plus_offset: { \
int32_t offset = ((int32_t)adr.offset() << 20) >> 20; \
la(temp, Address(adr.base(), adr.offset() - offset)); \
Assembler::jalr(REGISTER, temp, offset); \
break; \
} \
default: \
ShouldNotReachHere(); \
} \
}
INSN(j, x0);
INSN(jal, x1);
#undef INSN
#define INSN(NAME) \
void MacroAssembler::NAME(Register Rd, const address dest, Register temp) { \
assert_cond(dest != nullptr); \
int64_t distance = dest - pc(); \
if (is_simm21(distance) && ((distance % 2) == 0)) { \
Assembler::NAME(Rd, distance); \
} else { \
assert_different_registers(Rd, temp); \
int32_t offset = 0; \
movptr(temp, dest, offset); \
jalr(Rd, temp, offset); \
} \
} \
void MacroAssembler::NAME(Register Rd, Label &L, Register temp) { \
assert_different_registers(Rd, temp); \
wrap_label(Rd, L, temp, &MacroAssembler::NAME); \
}
INSN(jal);
#undef INSN
#define INSN(NAME, REGISTER) \
void MacroAssembler::NAME(Label &l, Register temp) { \
jal(REGISTER, l, temp); \
} \
INSN(j, x0);
INSN(jal, x1);
#undef INSN
void MacroAssembler::wrap_label(Register Rt, Label &L, Register tmp, load_insn_by_temp insn) {
if (L.is_bound()) {
(this->*insn)(Rt, target(L), tmp);
void MacroAssembler::jump_link(const address dest, Register temp) {
assert_cond(dest != nullptr);
int64_t distance = dest - pc();
if (is_simm21(distance) && ((distance % 2) == 0)) {
Assembler::jal(x1, distance);
} else {
L.add_patch_at(code(), locator());
(this->*insn)(Rt, pc(), tmp);
assert(temp != noreg && temp != x0, "expecting a register");
int32_t offset = 0;
la(temp, dest, offset);
jalr(temp, offset);
}
}
void MacroAssembler::jump_link(const Address &adr, Register temp) {
switch (adr.getMode()) {
case Address::literal: {
relocate(adr.rspec(), [&] {
jump_link(adr.target(), temp);
});
break;
}
case Address::base_plus_offset: {
int32_t offset = ((int32_t)adr.offset() << 20) >> 20;
la(temp, Address(adr.base(), adr.offset() - offset));
jalr(temp, offset);
break;
}
default:
ShouldNotReachHere();
}
}
void MacroAssembler::j(const address dest, Register temp) {
assert(CodeCache::contains(dest), "Must be");
assert_cond(dest != nullptr);
int64_t distance = dest - pc();
// We can't patch C, i.e. if Label wasn't bound we need to patch this jump.
IncompressibleRegion ir(this);
if (is_simm21(distance) && ((distance % 2) == 0)) {
Assembler::jal(x0, distance);
} else {
assert(temp != noreg && temp != x0, "expecting a register");
int32_t offset = 0;
la(temp, dest, offset);
jr(temp, offset);
}
}
void MacroAssembler::j(const Address &adr, Register temp) {
switch (adr.getMode()) {
case Address::literal: {
relocate(adr.rspec(), [&] {
j(adr.target(), temp);
});
break;
}
case Address::base_plus_offset: {
int32_t offset = ((int32_t)adr.offset() << 20) >> 20;
la(temp, Address(adr.base(), adr.offset() - offset));
jr(temp, offset);
break;
}
default:
ShouldNotReachHere();
}
}
void MacroAssembler::j(Label &lab, Register temp) {
assert_different_registers(x0, temp);
if (lab.is_bound()) {
MacroAssembler::j(target(lab), temp);
} else {
lab.add_patch_at(code(), locator());
MacroAssembler::j(pc(), temp);
}
}
void MacroAssembler::jr(Register Rd, int32_t offset) {
assert(Rd != noreg, "expecting a register");
Assembler::jalr(x0, Rd, offset);
}
void MacroAssembler::call(const address dest, Register temp) {
assert_cond(dest != nullptr);
assert(temp != noreg, "expecting a register");
int32_t offset = 0;
la(temp, dest, offset);
jalr(temp, offset);
}
void MacroAssembler::jalr(Register Rs, int32_t offset) {
assert(Rs != noreg, "expecting a register");
Assembler::jalr(x1, Rs, offset);
}
void MacroAssembler::rt_call(address dest, Register tmp) {
CodeBlob *cb = CodeCache::find_blob(dest);
RuntimeAddress target(dest);
if (cb) {
far_call(target, tmp);
} else {
relocate(target.rspec(), [&] {
int32_t offset;
la(tmp, target.target(), offset);
jalr(tmp, offset);
});
}
}
@ -3169,7 +3182,6 @@ void MacroAssembler::atomic_cas(
}
void MacroAssembler::far_jump(const Address &entry, Register tmp) {
assert(ReservedCodeCacheSize < 4*G, "branch out of range");
assert(CodeCache::find_blob(entry.target()) != nullptr,
"destination of far call not found in code cache");
assert(entry.rspec().type() == relocInfo::external_word_type
@ -3179,12 +3191,11 @@ void MacroAssembler::far_jump(const Address &entry, Register tmp) {
relocate(entry.rspec(), [&] {
int32_t offset;
la(tmp, entry.target(), offset);
jalr(x0, tmp, offset);
jr(tmp, offset);
});
}
void MacroAssembler::far_call(const Address &entry, Register tmp) {
assert(ReservedCodeCacheSize < 4*G, "branch out of range");
assert(CodeCache::find_blob(entry.target()) != nullptr,
"destination of far call not found in code cache");
assert(entry.rspec().type() == relocInfo::external_word_type
@ -3194,9 +3205,8 @@ void MacroAssembler::far_call(const Address &entry, Register tmp) {
// We can use auipc + jalr here because we know that the total size of
// the code cache cannot exceed 2Gb.
relocate(entry.rspec(), [&] {
int32_t offset;
la(tmp, entry.target(), offset);
jalr(x1, tmp, offset); // link
assert(is_valid_32bit_offset(entry.target() - pc()), "Far call using wrong instructions.");
call(entry.target(), tmp);
});
}
@ -3452,7 +3462,7 @@ void MacroAssembler::reserved_stack_check() {
relocate(target.rspec(), [&] {
int32_t offset;
movptr(t0, target.target(), offset);
jalr(x0, t0, offset);
jr(t0, offset);
});
should_not_reach_here();
@ -3534,7 +3544,7 @@ address MacroAssembler::trampoline_call(Address entry) {
}
#endif
relocate(entry.rspec(), [&] {
jal(target);
jump_link(target, t0);
});
postcond(pc() != badAddress);
@ -4373,7 +4383,7 @@ address MacroAssembler::zero_words(Register ptr, Register cnt) {
return nullptr;
}
} else {
jal(zero_blocks);
jump_link(zero_blocks, t0);
}
}
bind(around);
@ -5018,20 +5028,6 @@ void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
}
}
void MacroAssembler::rt_call(address dest, Register tmp) {
CodeBlob *cb = CodeCache::find_blob(dest);
RuntimeAddress target(dest);
if (cb) {
far_call(target, tmp);
} else {
relocate(target.rspec(), [&] {
int32_t offset;
movptr(tmp, target.target(), offset);
jalr(x1, tmp, offset);
});
}
}
void MacroAssembler::test_bit(Register Rd, Register Rs, uint32_t bit_pos) {
assert(bit_pos < 64, "invalid bit range");
if (UseZbs) {

@ -592,14 +592,40 @@ class MacroAssembler: public Assembler {
void bltz(Register Rs, const address dest);
void bgtz(Register Rs, const address dest);
void j(Label &l, Register temp = t0);
private:
void jump_link(const address dest, Register temp);
void jump_link(const Address &adr, Register temp);
public:
// We try to follow risc-v asm menomics.
// But as we don't layout a reachable GOT,
// we often need to resort to movptr, li <48imm>.
// https://github.com/riscv-non-isa/riscv-asm-manual/blob/master/riscv-asm.md
// jump: jal x0, offset
// For long reach uses temp register for:
// la + jr
void j(const address dest, Register temp = t0);
void j(const Address &adr, Register temp = t0);
void jal(Label &l, Register temp = t0);
void jal(const address dest, Register temp = t0);
void jal(const Address &adr, Register temp = t0);
void jal(Register Rd, Label &L, Register temp = t0);
void jal(Register Rd, const address dest, Register temp = t0);
void j(Label &l, Register temp = t0);
// jump register: jalr x0, offset(rs)
void jr(Register Rd, int32_t offset = 0);
// call: la + jalr x1
void call(const address dest, Register temp = t0);
// jalr: jalr x1, offset(rs)
void jalr(Register Rs, int32_t offset = 0);
// Emit a runtime call. Only invalidates the tmp register which
// is used to keep the entry address for jalr/movptr.
// Uses call() for intra code cache, else movptr + jalr.
void rt_call(address dest, Register tmp = t0);
// ret: jalr x0, 0(x1)
inline void ret() {
Assembler::jalr(x0, x1, 0);
}
//label
void beqz(Register Rs, Label &l, bool is_far = false);
@ -689,6 +715,14 @@ private:
return x < (twoG - twoK) && x >= (-twoG - twoK);
}
// Ensure that the auipc can reach the destination at x from anywhere within
// the code cache so that if it is relocated we know it will still reach.
bool is_32bit_offset_from_codecache(int64_t x) {
int64_t low = (int64_t)CodeCache::low_bound();
int64_t high = (int64_t)CodeCache::high_bound();
return is_valid_32bit_offset(x - low) && is_valid_32bit_offset(x - high);
}
public:
void push_reg(Register Rs);
void pop_reg(Register Rd);
@ -733,14 +767,13 @@ public:
typedef void (MacroAssembler::* compare_and_branch_insn)(Register Rs1, Register Rs2, const address dest);
typedef void (MacroAssembler::* compare_and_branch_label_insn)(Register Rs1, Register Rs2, Label &L, bool is_far);
typedef void (MacroAssembler::* jal_jalr_insn)(Register Rt, address dest);
typedef void (MacroAssembler::* load_insn_by_temp)(Register Rt, address dest, Register temp);
void wrap_label(Register r, Label &L, Register t, load_insn_by_temp insn);
void wrap_label(Register r, Label &L, jal_jalr_insn insn);
void wrap_label(Register r1, Register r2, Label &L,
compare_and_branch_insn insn,
compare_and_branch_label_insn neg_insn, bool is_far = false);
// la will use movptr instead of GOT when not in reach for auipc.
void la(Register Rd, Label &label);
void la(Register Rd, const address addr);
void la(Register Rd, const address addr, int32_t &offset);
@ -1469,21 +1502,6 @@ public:
VMRegPair dst,
bool is_receiver,
int* receiver_offset);
// Emit a runtime call. Only invalidates the tmp register which
// is used to keep the entry address for jalr/movptr.
void rt_call(address dest, Register tmp = t0);
void call(const address dest, Register temp = t0) {
assert_cond(dest != nullptr);
assert(temp != noreg, "expecting a register");
int32_t offset = 0;
mv(temp, dest, offset);
jalr(x1, temp, offset);
}
inline void ret() {
jalr(x0, x1, 0);
}
#ifdef ASSERT
// Template short-hand support to clean-up after a failed call to trampoline

@ -400,7 +400,7 @@ void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
int32_t offset = 0;
a.movptr(t0, entry, offset); // lui, addi, slli, addi, slli
a.jalr(x0, t0, offset); // jalr
a.jr(t0, offset); // jalr
ICache::invalidate_range(code_pos, instruction_size);
}
@ -410,7 +410,6 @@ void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer)
ShouldNotCallThis();
}
address NativeCallTrampolineStub::destination(nmethod *nm) const {
return ptr_at(data_offset);
}

@ -652,7 +652,7 @@ class StubGenerator: public StubCodeGenerator {
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
#endif
BLOCK_COMMENT("call MacroAssembler::debug");
__ call(CAST_FROM_FN_PTR(address, MacroAssembler::debug64));
__ rt_call(CAST_FROM_FN_PTR(address, MacroAssembler::debug64));
__ ebreak();
return start;
@ -5450,7 +5450,7 @@ static const int64_t right_3_bits = right_n_bits(3);
}
__ mv(c_rarg0, xthread);
BLOCK_COMMENT("call runtime_entry");
__ call(runtime_entry);
__ rt_call(runtime_entry);
// Generate oop map
OopMap* map = new OopMap(framesize, 0);

@ -1206,7 +1206,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// hand.
//
__ mv(c_rarg0, xthread);
__ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
__ rt_call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
__ get_method(xmethod);
__ reinit_heapbase();
__ bind(Continue);
@ -1255,7 +1255,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ push_call_clobbered_registers();
__ mv(c_rarg0, xthread);
__ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
__ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
__ pop_call_clobbered_registers();
__ bind(no_reguard);
}
@ -1815,7 +1815,7 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
// the tosca in-state for the given template.
assert(Interpreter::trace_code(t->tos_in()) != nullptr, "entry must have been generated");
__ jal(Interpreter::trace_code(t->tos_in()));
__ call(Interpreter::trace_code(t->tos_in()));
__ reinit_heapbase();
}