8332900: RISC-V: refactor nativeInst_riscv.cpp and macroAssembler_riscv.cpp

Reviewed-by: fyang, luhenry
This commit is contained in:
Hamlin Li 2024-06-04 07:04:57 +00:00
parent 67d6f3ca9e
commit 454660d361
6 changed files with 434 additions and 421 deletions

@ -629,7 +629,7 @@ void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format) {
case ZBarrierRelocationFormatMarkBadMask:
case ZBarrierRelocationFormatStoreGoodBits:
case ZBarrierRelocationFormatStoreBadMask:
assert(NativeInstruction::is_li16u_at(addr), "invalide zgc barrier");
assert(MacroAssembler::is_li16u_at(addr), "invalide zgc barrier");
bytes = MacroAssembler::pd_patch_instruction_size(addr, (address)(uintptr_t)value);
break;
default:

@ -38,7 +38,6 @@
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "nativeInst_riscv.hpp"
#include "oops/accessDecorators.hpp"
#include "oops/compressedKlass.inline.hpp"
#include "oops/compressedOops.inline.hpp"
@ -65,6 +64,138 @@
#define STOP(str) stop(str);
#define BIND(label) bind(label); __ BLOCK_COMMENT(#label ":")
Register MacroAssembler::extract_rs1(address instr) {
assert_cond(instr != nullptr);
return as_Register(Assembler::extract(Assembler::ld_instr(instr), 19, 15));
}
Register MacroAssembler::extract_rs2(address instr) {
assert_cond(instr != nullptr);
return as_Register(Assembler::extract(Assembler::ld_instr(instr), 24, 20));
}
Register MacroAssembler::extract_rd(address instr) {
assert_cond(instr != nullptr);
return as_Register(Assembler::extract(Assembler::ld_instr(instr), 11, 7));
}
uint32_t MacroAssembler::extract_opcode(address instr) {
assert_cond(instr != nullptr);
return Assembler::extract(Assembler::ld_instr(instr), 6, 0);
}
uint32_t MacroAssembler::extract_funct3(address instr) {
assert_cond(instr != nullptr);
return Assembler::extract(Assembler::ld_instr(instr), 14, 12);
}
bool MacroAssembler::is_pc_relative_at(address instr) {
// auipc + jalr
// auipc + addi
// auipc + load
// auipc + fload_load
return (is_auipc_at(instr)) &&
(is_addi_at(instr + instruction_size) ||
is_jalr_at(instr + instruction_size) ||
is_load_at(instr + instruction_size) ||
is_float_load_at(instr + instruction_size)) &&
check_pc_relative_data_dependency(instr);
}
// ie:ld(Rd, Label)
bool MacroAssembler::is_load_pc_relative_at(address instr) {
return is_auipc_at(instr) && // auipc
is_ld_at(instr + instruction_size) && // ld
check_load_pc_relative_data_dependency(instr);
}
bool MacroAssembler::is_movptr1_at(address instr) {
return is_lui_at(instr) && // Lui
is_addi_at(instr + instruction_size) && // Addi
is_slli_shift_at(instr + instruction_size * 2, 11) && // Slli Rd, Rs, 11
is_addi_at(instr + instruction_size * 3) && // Addi
is_slli_shift_at(instr + instruction_size * 4, 6) && // Slli Rd, Rs, 6
(is_addi_at(instr + instruction_size * 5) ||
is_jalr_at(instr + instruction_size * 5) ||
is_load_at(instr + instruction_size * 5)) && // Addi/Jalr/Load
check_movptr1_data_dependency(instr);
}
bool MacroAssembler::is_movptr2_at(address instr) {
return is_lui_at(instr) && // lui
is_lui_at(instr + instruction_size) && // lui
is_slli_shift_at(instr + instruction_size * 2, 18) && // slli Rd, Rs, 18
is_add_at(instr + instruction_size * 3) &&
(is_addi_at(instr + instruction_size * 4) ||
is_jalr_at(instr + instruction_size * 4) ||
is_load_at(instr + instruction_size * 4)) && // Addi/Jalr/Load
check_movptr2_data_dependency(instr);
}
bool MacroAssembler::is_li16u_at(address instr) {
return is_lui_at(instr) && // lui
is_srli_at(instr + instruction_size) && // srli
check_li16u_data_dependency(instr);
}
bool MacroAssembler::is_li32_at(address instr) {
return is_lui_at(instr) && // lui
is_addiw_at(instr + instruction_size) && // addiw
check_li32_data_dependency(instr);
}
bool MacroAssembler::is_li64_at(address instr) {
return is_lui_at(instr) && // lui
is_addi_at(instr + instruction_size) && // addi
is_slli_shift_at(instr + instruction_size * 2, 12) && // Slli Rd, Rs, 12
is_addi_at(instr + instruction_size * 3) && // addi
is_slli_shift_at(instr + instruction_size * 4, 12) && // Slli Rd, Rs, 12
is_addi_at(instr + instruction_size * 5) && // addi
is_slli_shift_at(instr + instruction_size * 6, 8) && // Slli Rd, Rs, 8
is_addi_at(instr + instruction_size * 7) && // addi
check_li64_data_dependency(instr);
}
bool MacroAssembler::is_lwu_to_zr(address instr) {
assert_cond(instr != nullptr);
return (extract_opcode(instr) == 0b0000011 &&
extract_funct3(instr) == 0b110 &&
extract_rd(instr) == zr); // zr
}
uint32_t MacroAssembler::get_membar_kind(address addr) {
assert_cond(addr != nullptr);
assert(is_membar(addr), "no membar found");
uint32_t insn = Bytes::get_native_u4(addr);
uint32_t predecessor = Assembler::extract(insn, 27, 24);
uint32_t successor = Assembler::extract(insn, 23, 20);
return MacroAssembler::pred_succ_to_membar_mask(predecessor, successor);
}
void MacroAssembler::set_membar_kind(address addr, uint32_t order_kind) {
assert_cond(addr != nullptr);
assert(is_membar(addr), "no membar found");
uint32_t predecessor = 0;
uint32_t successor = 0;
MacroAssembler::membar_mask_to_pred_succ(order_kind, predecessor, successor);
uint32_t insn = Bytes::get_native_u4(addr);
address pInsn = (address) &insn;
Assembler::patch(pInsn, 27, 24, predecessor);
Assembler::patch(pInsn, 23, 20, successor);
address membar = addr;
Assembler::sd_instr(membar, insn);
}
static void pass_arg0(MacroAssembler* masm, Register arg) {
if (c_rarg0 != arg) {
masm->mv(c_rarg0, arg);
@ -1405,7 +1536,7 @@ static int patch_offset_in_jal(address branch, int64_t offset) {
Assembler::patch(branch, 30, 21, (offset >> 1) & 0x3ff); // offset[10:1] ==> branch[30:21]
Assembler::patch(branch, 20, 20, (offset >> 11) & 0x1); // offset[11] ==> branch[20]
Assembler::patch(branch, 19, 12, (offset >> 12) & 0xff); // offset[19:12] ==> branch[19:12]
return NativeInstruction::instruction_size; // only one instruction
return MacroAssembler::instruction_size; // only one instruction
}
static int patch_offset_in_conditional_branch(address branch, int64_t offset) {
@ -1415,14 +1546,14 @@ static int patch_offset_in_conditional_branch(address branch, int64_t offset) {
Assembler::patch(branch, 30, 25, (offset >> 5) & 0x3f); // offset[10:5] ==> branch[30:25]
Assembler::patch(branch, 7, 7, (offset >> 11) & 0x1); // offset[11] ==> branch[7]
Assembler::patch(branch, 11, 8, (offset >> 1) & 0xf); // offset[4:1] ==> branch[11:8]
return NativeInstruction::instruction_size; // only one instruction
return MacroAssembler::instruction_size; // only one instruction
}
static int patch_offset_in_pc_relative(address branch, int64_t offset) {
const int PC_RELATIVE_INSTRUCTION_NUM = 2; // auipc, addi/jalr/load
Assembler::patch(branch, 31, 12, ((offset + 0x800) >> 12) & 0xfffff); // Auipc. offset[31:12] ==> branch[31:12]
Assembler::patch(branch + 4, 31, 20, offset & 0xfff); // Addi/Jalr/Load. offset[11:0] ==> branch[31:20]
return PC_RELATIVE_INSTRUCTION_NUM * NativeInstruction::instruction_size;
return PC_RELATIVE_INSTRUCTION_NUM * MacroAssembler::instruction_size;
}
static int patch_addr_in_movptr1(address branch, address target) {
@ -1432,7 +1563,7 @@ static int patch_addr_in_movptr1(address branch, address target) {
Assembler::patch(branch + 4, 31, 20, (lower >> 17) & 0xfff); // Addi. target[28:17] ==> branch[31:20]
Assembler::patch(branch + 12, 31, 20, (lower >> 6) & 0x7ff); // Addi. target[16: 6] ==> branch[31:20]
Assembler::patch(branch + 20, 31, 20, lower & 0x3f); // Addi/Jalr/Load. target[ 5: 0] ==> branch[31:20]
return NativeMovConstReg::movptr1_instruction_size;
return MacroAssembler::movptr1_instruction_size;
}
static int patch_addr_in_movptr2(address instruction_address, address target) {
@ -1444,15 +1575,15 @@ static int patch_addr_in_movptr2(address instruction_address, address target) {
int low12 = (lower30 << 20) >> 20;
int mid18 = ((lower30 - low12) >> 12);
Assembler::patch(instruction_address + (NativeInstruction::instruction_size * 0), 31, 12, (upper18 & 0xfffff)); // Lui
Assembler::patch(instruction_address + (NativeInstruction::instruction_size * 1), 31, 12, (mid18 & 0xfffff)); // Lui
Assembler::patch(instruction_address + (MacroAssembler::instruction_size * 0), 31, 12, (upper18 & 0xfffff)); // Lui
Assembler::patch(instruction_address + (MacroAssembler::instruction_size * 1), 31, 12, (mid18 & 0xfffff)); // Lui
// Slli
// Add
Assembler::patch(instruction_address + (NativeInstruction::instruction_size * 4), 31, 20, low12 & 0xfff); // Addi/Jalr/Load
Assembler::patch(instruction_address + (MacroAssembler::instruction_size * 4), 31, 20, low12 & 0xfff); // Addi/Jalr/Load
assert(MacroAssembler::target_addr_for_insn(instruction_address) == target, "Must be");
return NativeMovConstReg::movptr2_instruction_size;
return MacroAssembler::movptr2_instruction_size;
}
static int patch_imm_in_li64(address branch, address target) {
@ -1473,12 +1604,12 @@ static int patch_imm_in_li64(address branch, address target) {
Assembler::patch(branch + 12, 31, 20, ((int32_t)lower >> 20) & 0xfff); // Addi.
Assembler::patch(branch + 20, 31, 20, (((intptr_t)target << 44) >> 52) & 0xfff); // Addi.
Assembler::patch(branch + 28, 31, 20, (intptr_t)target & 0xff); // Addi.
return LI64_INSTRUCTIONS_NUM * NativeInstruction::instruction_size;
return LI64_INSTRUCTIONS_NUM * MacroAssembler::instruction_size;
}
static int patch_imm_in_li16u(address branch, uint16_t target) {
Assembler::patch(branch, 31, 12, target); // patch lui only
return NativeInstruction::instruction_size;
return MacroAssembler::instruction_size;
}
int MacroAssembler::patch_imm_in_li32(address branch, int32_t target) {
@ -1489,7 +1620,7 @@ int MacroAssembler::patch_imm_in_li32(address branch, int32_t target) {
upper = (int32_t)upper;
Assembler::patch(branch + 0, 31, 12, (upper >> 12) & 0xfffff); // Lui.
Assembler::patch(branch + 4, 31, 20, lower & 0xfff); // Addiw.
return LI32_INSTRUCTIONS_NUM * NativeInstruction::instruction_size;
return LI32_INSTRUCTIONS_NUM * MacroAssembler::instruction_size;
}
static long get_offset_of_jal(address insn_addr) {
@ -1537,11 +1668,11 @@ static address get_target_of_movptr1(address insn_addr) {
static address get_target_of_movptr2(address insn_addr) {
assert_cond(insn_addr != nullptr);
int32_t upper18 = ((Assembler::sextract(Assembler::ld_instr(insn_addr + NativeInstruction::instruction_size * 0), 31, 12)) & 0xfffff); // Lui
int32_t mid18 = ((Assembler::sextract(Assembler::ld_instr(insn_addr + NativeInstruction::instruction_size * 1), 31, 12)) & 0xfffff); // Lui
int32_t upper18 = ((Assembler::sextract(Assembler::ld_instr(insn_addr + MacroAssembler::instruction_size * 0), 31, 12)) & 0xfffff); // Lui
int32_t mid18 = ((Assembler::sextract(Assembler::ld_instr(insn_addr + MacroAssembler::instruction_size * 1), 31, 12)) & 0xfffff); // Lui
// 2 // Slli
// 3 // Add
int32_t low12 = ((Assembler::sextract(Assembler::ld_instr(insn_addr + NativeInstruction::instruction_size * 4), 31, 20))); // Addi/Jalr/Load.
int32_t low12 = ((Assembler::sextract(Assembler::ld_instr(insn_addr + MacroAssembler::instruction_size * 4), 31, 20))); // Addi/Jalr/Load.
address ret = (address)(((intptr_t)upper18<<30ll) + ((intptr_t)mid18<<12ll) + low12);
return ret;
}
@ -1568,22 +1699,22 @@ address MacroAssembler::get_target_of_li32(address insn_addr) {
int MacroAssembler::pd_patch_instruction_size(address instruction_address, address target) {
assert_cond(instruction_address != nullptr);
int64_t offset = target - instruction_address;
if (NativeInstruction::is_jal_at(instruction_address)) { // jal
if (MacroAssembler::is_jal_at(instruction_address)) { // jal
return patch_offset_in_jal(instruction_address, offset);
} else if (NativeInstruction::is_branch_at(instruction_address)) { // beq/bge/bgeu/blt/bltu/bne
} else if (MacroAssembler::is_branch_at(instruction_address)) { // beq/bge/bgeu/blt/bltu/bne
return patch_offset_in_conditional_branch(instruction_address, offset);
} else if (NativeInstruction::is_pc_relative_at(instruction_address)) { // auipc, addi/jalr/load
} else if (MacroAssembler::is_pc_relative_at(instruction_address)) { // auipc, addi/jalr/load
return patch_offset_in_pc_relative(instruction_address, offset);
} else if (NativeInstruction::is_movptr1_at(instruction_address)) { // movptr1
} else if (MacroAssembler::is_movptr1_at(instruction_address)) { // movptr1
return patch_addr_in_movptr1(instruction_address, target);
} else if (NativeInstruction::is_movptr2_at(instruction_address)) { // movptr2
} else if (MacroAssembler::is_movptr2_at(instruction_address)) { // movptr2
return patch_addr_in_movptr2(instruction_address, target);
} else if (NativeInstruction::is_li64_at(instruction_address)) { // li64
} else if (MacroAssembler::is_li64_at(instruction_address)) { // li64
return patch_imm_in_li64(instruction_address, target);
} else if (NativeInstruction::is_li32_at(instruction_address)) { // li32
} else if (MacroAssembler::is_li32_at(instruction_address)) { // li32
int64_t imm = (intptr_t)target;
return patch_imm_in_li32(instruction_address, (int32_t)imm);
} else if (NativeInstruction::is_li16u_at(instruction_address)) {
} else if (MacroAssembler::is_li16u_at(instruction_address)) {
int64_t imm = (intptr_t)target;
return patch_imm_in_li16u(instruction_address, (uint16_t)imm);
} else {
@ -1600,19 +1731,19 @@ int MacroAssembler::pd_patch_instruction_size(address instruction_address, addre
address MacroAssembler::target_addr_for_insn(address insn_addr) {
long offset = 0;
assert_cond(insn_addr != nullptr);
if (NativeInstruction::is_jal_at(insn_addr)) { // jal
if (MacroAssembler::is_jal_at(insn_addr)) { // jal
offset = get_offset_of_jal(insn_addr);
} else if (NativeInstruction::is_branch_at(insn_addr)) { // beq/bge/bgeu/blt/bltu/bne
} else if (MacroAssembler::is_branch_at(insn_addr)) { // beq/bge/bgeu/blt/bltu/bne
offset = get_offset_of_conditional_branch(insn_addr);
} else if (NativeInstruction::is_pc_relative_at(insn_addr)) { // auipc, addi/jalr/load
} else if (MacroAssembler::is_pc_relative_at(insn_addr)) { // auipc, addi/jalr/load
offset = get_offset_of_pc_relative(insn_addr);
} else if (NativeInstruction::is_movptr1_at(insn_addr)) { // movptr1
} else if (MacroAssembler::is_movptr1_at(insn_addr)) { // movptr1
return get_target_of_movptr1(insn_addr);
} else if (NativeInstruction::is_movptr2_at(insn_addr)) { // movptr2
} else if (MacroAssembler::is_movptr2_at(insn_addr)) { // movptr2
return get_target_of_movptr2(insn_addr);
} else if (NativeInstruction::is_li64_at(insn_addr)) { // li64
} else if (MacroAssembler::is_li64_at(insn_addr)) { // li64
return get_target_of_li64(insn_addr);
} else if (NativeInstruction::is_li32_at(insn_addr)) { // li32
} else if (MacroAssembler::is_li32_at(insn_addr)) { // li32
return get_target_of_li32(insn_addr);
} else {
ShouldNotReachHere();
@ -1624,14 +1755,14 @@ int MacroAssembler::patch_oop(address insn_addr, address o) {
// OOPs are either narrow (32 bits) or wide (48 bits). We encode
// narrow OOPs by setting the upper 16 bits in the first
// instruction.
if (NativeInstruction::is_li32_at(insn_addr)) {
if (MacroAssembler::is_li32_at(insn_addr)) {
// Move narrow OOP
uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o));
return patch_imm_in_li32(insn_addr, (int32_t)n);
} else if (NativeInstruction::is_movptr1_at(insn_addr)) {
} else if (MacroAssembler::is_movptr1_at(insn_addr)) {
// Move wide OOP
return patch_addr_in_movptr1(insn_addr, o);
} else if (NativeInstruction::is_movptr2_at(insn_addr)) {
} else if (MacroAssembler::is_movptr2_at(insn_addr)) {
// Move wide OOP
return patch_addr_in_movptr2(insn_addr, o);
}
@ -2779,14 +2910,13 @@ void MacroAssembler::lookup_virtual_method(Register recv_klass,
}
void MacroAssembler::membar(uint32_t order_constraint) {
address prev = pc() - NativeMembar::instruction_size;
address prev = pc() - MacroAssembler::instruction_size;
address last = code()->last_insn();
if (last != nullptr && nativeInstruction_at(last)->is_membar() && prev == last) {
NativeMembar *bar = NativeMembar_at(prev);
if (last != nullptr && is_membar(last) && prev == last) {
// We are merging two memory barrier instructions. On RISCV we
// can do this simply by ORing them together.
bar->set_kind(bar->get_kind() | order_constraint);
set_membar_kind(prev, get_membar_kind(prev) | order_constraint);
BLOCK_COMMENT("merged membar");
} else {
code()->set_last_insn(pc());
@ -3653,7 +3783,7 @@ address MacroAssembler::ic_call(address entry, jint method_index) {
int MacroAssembler::ic_check_size() {
// No compressed
return (NativeInstruction::instruction_size * (2 /* 2 loads */ + 1 /* branch */)) +
return (MacroAssembler::instruction_size * (2 /* 2 loads */ + 1 /* branch */)) +
far_branch_size();
}
@ -3720,7 +3850,7 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
// instructions code-section.
// Make sure the address of destination 8-byte aligned after 3 instructions.
align(wordSize, NativeCallTrampolineStub::data_offset);
align(wordSize, MacroAssembler::trampoline_stub_data_offset);
RelocationHolder rh = trampoline_stub_Relocation::spec(code()->insts()->start() +
insts_call_instruction_offset);
@ -3733,7 +3863,7 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
ld(t0, target); // auipc + ld
jr(t0); // jalr
bind(target);
assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset,
assert(offset() - stub_start_offset == MacroAssembler::trampoline_stub_data_offset,
"should be");
assert(offset() % wordSize == 0, "bad alignment");
emit_int64((int64_t)dest);
@ -3741,7 +3871,7 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
const address stub_start_addr = addr_at(stub_start_offset);
assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
assert(MacroAssembler::is_trampoline_stub_at(stub_start_addr), "doesn't look like a trampoline");
end_a_stub();
return stub_start_addr;
@ -3749,12 +3879,12 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
int MacroAssembler::max_trampoline_stub_size() {
// Max stub size: alignment nop, TrampolineStub.
return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
return MacroAssembler::instruction_size + MacroAssembler::trampoline_stub_instruction_size;
}
int MacroAssembler::static_call_stub_size() {
// (lui, addi, slli, addi, slli, addi) + (lui + lui + slli + add) + jalr
return 11 * NativeInstruction::instruction_size;
return 11 * MacroAssembler::instruction_size;
}
Address MacroAssembler::add_memory_helper(const Address dst, Register tmp) {

@ -30,7 +30,6 @@
#include "asm/assembler.inline.hpp"
#include "code/vmreg.hpp"
#include "metaprogramming/enableIf.hpp"
#include "nativeInst_riscv.hpp"
#include "oops/compressedOops.hpp"
#include "utilities/powerOfTwo.hpp"
@ -42,6 +41,7 @@
class MacroAssembler: public Assembler {
public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod);
@ -49,7 +49,7 @@ class MacroAssembler: public Assembler {
// Alignment
int align(int modulus, int extra_offset = 0);
static inline void assert_alignment(address pc, int alignment = NativeInstruction::instruction_size) {
static inline void assert_alignment(address pc, int alignment = MacroAssembler::instruction_size) {
assert(is_aligned(pc, alignment), "bad alignment");
}
@ -1232,7 +1232,7 @@ public:
address ic_call(address entry, jint method_index = 0);
static int ic_check_size();
int ic_check(int end_alignment = NativeInstruction::instruction_size);
int ic_check(int end_alignment = MacroAssembler::instruction_size);
// Support for memory inc/dec
// n.b. increment/decrement calls with an Address destination will
@ -1542,6 +1542,226 @@ private:
public:
void lightweight_lock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow);
void lightweight_unlock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow);
public:
enum {
// Refer to function emit_trampoline_stub.
trampoline_stub_instruction_size = 3 * instruction_size + wordSize, // auipc + ld + jr + target address
trampoline_stub_data_offset = 3 * instruction_size, // auipc + ld + jr
// movptr
movptr1_instruction_size = 6 * instruction_size, // lui, addi, slli, addi, slli, addi. See movptr1().
movptr2_instruction_size = 5 * instruction_size, // lui, lui, slli, add, addi. See movptr2().
load_pc_relative_instruction_size = 2 * instruction_size // auipc, ld
};
static bool is_load_pc_relative_at(address branch);
static bool is_li16u_at(address instr);
static bool is_trampoline_stub_at(address addr) {
// Ensure that the stub is exactly
// ld t0, L--->auipc + ld
// jr t0
// L:
// judge inst + register + imm
// 1). check the instructions: auipc + ld + jalr
// 2). check if auipc[11:7] == t0 and ld[11:7] == t0 and ld[19:15] == t0 && jr[19:15] == t0
// 3). check if the offset in ld[31:20] equals the data_offset
assert_cond(addr != nullptr);
const int instr_size = instruction_size;
if (is_auipc_at(addr) &&
is_ld_at(addr + instr_size) &&
is_jalr_at(addr + 2 * instr_size) &&
(extract_rd(addr) == x5) &&
(extract_rd(addr + instr_size) == x5) &&
(extract_rs1(addr + instr_size) == x5) &&
(extract_rs1(addr + 2 * instr_size) == x5) &&
(Assembler::extract(Assembler::ld_instr(addr + 4), 31, 20) == trampoline_stub_data_offset)) {
return true;
}
return false;
}
static bool is_call_at(address instr) {
if (is_jal_at(instr) || is_jalr_at(instr)) {
return true;
}
return false;
}
static bool is_jal_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1101111; }
static bool is_jalr_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1100111 && extract_funct3(instr) == 0b000; }
static bool is_branch_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1100011; }
static bool is_ld_at(address instr) { assert_cond(instr != nullptr); return is_load_at(instr) && extract_funct3(instr) == 0b011; }
static bool is_load_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0000011; }
static bool is_float_load_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0000111; }
static bool is_auipc_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0010111; }
static bool is_jump_at(address instr) { assert_cond(instr != nullptr); return is_branch_at(instr) || is_jal_at(instr) || is_jalr_at(instr); }
static bool is_add_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0110011 && extract_funct3(instr) == 0b000; }
static bool is_addi_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0010011 && extract_funct3(instr) == 0b000; }
static bool is_addiw_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0011011 && extract_funct3(instr) == 0b000; }
static bool is_addiw_to_zr_at(address instr){ assert_cond(instr != nullptr); return is_addiw_at(instr) && extract_rd(instr) == zr; }
static bool is_lui_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0110111; }
static bool is_lui_to_zr_at(address instr) { assert_cond(instr != nullptr); return is_lui_at(instr) && extract_rd(instr) == zr; }
static bool is_srli_at(address instr) {
assert_cond(instr != nullptr);
return extract_opcode(instr) == 0b0010011 &&
extract_funct3(instr) == 0b101 &&
Assembler::extract(((unsigned*)instr)[0], 31, 26) == 0b000000;
}
static bool is_slli_shift_at(address instr, uint32_t shift) {
assert_cond(instr != nullptr);
return (extract_opcode(instr) == 0b0010011 && // opcode field
extract_funct3(instr) == 0b001 && // funct3 field, select the type of operation
Assembler::extract(Assembler::ld_instr(instr), 25, 20) == shift); // shamt field
}
static bool is_movptr1_at(address instr);
static bool is_movptr2_at(address instr);
static bool is_lwu_to_zr(address instr);
private:
static Register extract_rs1(address instr);
static Register extract_rs2(address instr);
static Register extract_rd(address instr);
static uint32_t extract_opcode(address instr);
static uint32_t extract_funct3(address instr);
// the instruction sequence of movptr is as below:
// lui
// addi
// slli
// addi
// slli
// addi/jalr/load
static bool check_movptr1_data_dependency(address instr) {
address lui = instr;
address addi1 = lui + instruction_size;
address slli1 = addi1 + instruction_size;
address addi2 = slli1 + instruction_size;
address slli2 = addi2 + instruction_size;
address last_instr = slli2 + instruction_size;
return extract_rs1(addi1) == extract_rd(lui) &&
extract_rs1(addi1) == extract_rd(addi1) &&
extract_rs1(slli1) == extract_rd(addi1) &&
extract_rs1(slli1) == extract_rd(slli1) &&
extract_rs1(addi2) == extract_rd(slli1) &&
extract_rs1(addi2) == extract_rd(addi2) &&
extract_rs1(slli2) == extract_rd(addi2) &&
extract_rs1(slli2) == extract_rd(slli2) &&
extract_rs1(last_instr) == extract_rd(slli2);
}
// the instruction sequence of movptr2 is as below:
// lui
// lui
// slli
// add
// addi/jalr/load
static bool check_movptr2_data_dependency(address instr) {
address lui1 = instr;
address lui2 = lui1 + instruction_size;
address slli = lui2 + instruction_size;
address add = slli + instruction_size;
address last_instr = add + instruction_size;
return extract_rd(add) == extract_rd(lui2) &&
extract_rs1(add) == extract_rd(lui2) &&
extract_rs2(add) == extract_rd(slli) &&
extract_rs1(slli) == extract_rd(lui1) &&
extract_rd(slli) == extract_rd(lui1) &&
extract_rs1(last_instr) == extract_rd(add);
}
// the instruction sequence of li64 is as below:
// lui
// addi
// slli
// addi
// slli
// addi
// slli
// addi
static bool check_li64_data_dependency(address instr) {
address lui = instr;
address addi1 = lui + instruction_size;
address slli1 = addi1 + instruction_size;
address addi2 = slli1 + instruction_size;
address slli2 = addi2 + instruction_size;
address addi3 = slli2 + instruction_size;
address slli3 = addi3 + instruction_size;
address addi4 = slli3 + instruction_size;
return extract_rs1(addi1) == extract_rd(lui) &&
extract_rs1(addi1) == extract_rd(addi1) &&
extract_rs1(slli1) == extract_rd(addi1) &&
extract_rs1(slli1) == extract_rd(slli1) &&
extract_rs1(addi2) == extract_rd(slli1) &&
extract_rs1(addi2) == extract_rd(addi2) &&
extract_rs1(slli2) == extract_rd(addi2) &&
extract_rs1(slli2) == extract_rd(slli2) &&
extract_rs1(addi3) == extract_rd(slli2) &&
extract_rs1(addi3) == extract_rd(addi3) &&
extract_rs1(slli3) == extract_rd(addi3) &&
extract_rs1(slli3) == extract_rd(slli3) &&
extract_rs1(addi4) == extract_rd(slli3) &&
extract_rs1(addi4) == extract_rd(addi4);
}
// the instruction sequence of li16u is as below:
// lui
// srli
static bool check_li16u_data_dependency(address instr) {
address lui = instr;
address srli = lui + instruction_size;
return extract_rs1(srli) == extract_rd(lui) &&
extract_rs1(srli) == extract_rd(srli);
}
// the instruction sequence of li32 is as below:
// lui
// addiw
static bool check_li32_data_dependency(address instr) {
address lui = instr;
address addiw = lui + instruction_size;
return extract_rs1(addiw) == extract_rd(lui) &&
extract_rs1(addiw) == extract_rd(addiw);
}
// the instruction sequence of pc-relative is as below:
// auipc
// jalr/addi/load/float_load
static bool check_pc_relative_data_dependency(address instr) {
address auipc = instr;
address last_instr = auipc + instruction_size;
return extract_rs1(last_instr) == extract_rd(auipc);
}
// the instruction sequence of load_label is as below:
// auipc
// load
static bool check_load_pc_relative_data_dependency(address instr) {
address auipc = instr;
address load = auipc + instruction_size;
return extract_rd(load) == extract_rd(auipc) &&
extract_rs1(load) == extract_rd(load);
}
static bool is_li32_at(address instr);
static bool is_li64_at(address instr);
static bool is_pc_relative_at(address branch);
static bool is_membar(address addr) {
return (Bytes::get_native_u4(addr) & 0x7f) == 0b1111 && extract_funct3(addr) == 0;
}
static uint32_t get_membar_kind(address addr);
static void set_membar_kind(address addr, uint32_t order_kind);
};
#ifdef ASSERT

@ -39,112 +39,20 @@
#include "c1/c1_Runtime1.hpp"
#endif
Register NativeInstruction::extract_rs1(address instr) {
assert_cond(instr != nullptr);
return as_Register(Assembler::extract(Assembler::ld_instr(instr), 19, 15));
}
Register NativeInstruction::extract_rs2(address instr) {
assert_cond(instr != nullptr);
return as_Register(Assembler::extract(Assembler::ld_instr(instr), 24, 20));
}
Register NativeInstruction::extract_rd(address instr) {
assert_cond(instr != nullptr);
return as_Register(Assembler::extract(Assembler::ld_instr(instr), 11, 7));
}
uint32_t NativeInstruction::extract_opcode(address instr) {
assert_cond(instr != nullptr);
return Assembler::extract(Assembler::ld_instr(instr), 6, 0);
}
uint32_t NativeInstruction::extract_funct3(address instr) {
assert_cond(instr != nullptr);
return Assembler::extract(Assembler::ld_instr(instr), 14, 12);
}
bool NativeInstruction::is_pc_relative_at(address instr) {
// auipc + jalr
// auipc + addi
// auipc + load
// auipc + fload_load
return (is_auipc_at(instr)) &&
(is_addi_at(instr + instruction_size) ||
is_jalr_at(instr + instruction_size) ||
is_load_at(instr + instruction_size) ||
is_float_load_at(instr + instruction_size)) &&
check_pc_relative_data_dependency(instr);
}
// ie:ld(Rd, Label)
bool NativeInstruction::is_load_pc_relative_at(address instr) {
return is_auipc_at(instr) && // auipc
is_ld_at(instr + instruction_size) && // ld
check_load_pc_relative_data_dependency(instr);
}
bool NativeInstruction::is_movptr1_at(address instr) {
return is_lui_at(instr) && // Lui
is_addi_at(instr + instruction_size) && // Addi
is_slli_shift_at(instr + instruction_size * 2, 11) && // Slli Rd, Rs, 11
is_addi_at(instr + instruction_size * 3) && // Addi
is_slli_shift_at(instr + instruction_size * 4, 6) && // Slli Rd, Rs, 6
(is_addi_at(instr + instruction_size * 5) ||
is_jalr_at(instr + instruction_size * 5) ||
is_load_at(instr + instruction_size * 5)) && // Addi/Jalr/Load
check_movptr1_data_dependency(instr);
}
bool NativeInstruction::is_movptr2_at(address instr) {
return is_lui_at(instr) && // lui
is_lui_at(instr + instruction_size) && // lui
is_slli_shift_at(instr + instruction_size * 2, 18) && // slli Rd, Rs, 18
is_add_at(instr + instruction_size * 3) &&
(is_addi_at(instr + instruction_size * 4) ||
is_jalr_at(instr + instruction_size * 4) ||
is_load_at(instr + instruction_size * 4)) && // Addi/Jalr/Load
check_movptr2_data_dependency(instr);
}
bool NativeInstruction::is_li16u_at(address instr) {
return is_lui_at(instr) && // lui
is_srli_at(instr + instruction_size) && // srli
check_li16u_data_dependency(instr);
}
bool NativeInstruction::is_li32_at(address instr) {
return is_lui_at(instr) && // lui
is_addiw_at(instr + instruction_size) && // addiw
check_li32_data_dependency(instr);
}
bool NativeInstruction::is_li64_at(address instr) {
return is_lui_at(instr) && // lui
is_addi_at(instr + instruction_size) && // addi
is_slli_shift_at(instr + instruction_size * 2, 12) && // Slli Rd, Rs, 12
is_addi_at(instr + instruction_size * 3) && // addi
is_slli_shift_at(instr + instruction_size * 4, 12) && // Slli Rd, Rs, 12
is_addi_at(instr + instruction_size * 5) && // addi
is_slli_shift_at(instr + instruction_size * 6, 8) && // Slli Rd, Rs, 8
is_addi_at(instr + instruction_size * 7) && // addi
check_li64_data_dependency(instr);
}
void NativeCall::verify() {
assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
assert(MacroAssembler::is_call_at((address)this), "unexpected code at call site");
}
address NativeCall::destination() const {
address addr = (address)this;
assert(NativeInstruction::is_jal_at(instruction_address()), "inst must be jal.");
assert(MacroAssembler::is_jal_at(instruction_address()), "inst must be jal.");
address destination = MacroAssembler::target_addr_for_insn(instruction_address());
// Do we use a trampoline stub for this call?
CodeBlob* cb = CodeCache::find_blob(addr);
assert(cb && cb->is_nmethod(), "sanity");
nmethod *nm = (nmethod *)cb;
if (nm != nullptr && nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
if (nm != nullptr && nm->stub_contains(destination) && MacroAssembler::is_trampoline_stub_at(destination)) {
// Yes we do, so get the destination from the trampoline stub.
const address trampoline_stub_addr = destination;
destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
@ -168,12 +76,12 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
"concurrent code patching");
address addr_call = addr_at(0);
assert(NativeCall::is_call_at(addr_call), "unexpected code at call site");
assert(MacroAssembler::is_call_at(addr_call), "unexpected code at call site");
// Patch the constant in the call's trampoline stub.
address trampoline_stub_addr = get_trampoline();
if (trampoline_stub_addr != nullptr) {
assert (!is_NativeCallTrampolineStub_at(dest), "chained trampolines");
assert (!MacroAssembler::is_trampoline_stub_at(dest), "chained trampolines");
nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
}
@ -195,7 +103,7 @@ address NativeCall::get_trampoline() {
assert(code != nullptr, "Could not find the containing code blob");
address jal_destination = MacroAssembler::pd_call_destination(call_addr);
if (code != nullptr && code->contains(jal_destination) && is_NativeCallTrampolineStub_at(jal_destination)) {
if (code != nullptr && code->contains(jal_destination) && MacroAssembler::is_trampoline_stub_at(jal_destination)) {
return jal_destination;
}
@ -341,14 +249,7 @@ address NativeGeneralJump::jump_destination() const {
//-------------------------------------------------------------------
bool NativeInstruction::is_safepoint_poll() {
return is_lwu_to_zr(address(this));
}
bool NativeInstruction::is_lwu_to_zr(address instr) {
assert_cond(instr != nullptr);
return (extract_opcode(instr) == 0b0000011 &&
extract_funct3(instr) == 0b110 &&
extract_rd(instr) == zr); // zr
return MacroAssembler::is_lwu_to_zr(address(this));
}
// A 16-bit instruction with all bits ones is permanently reserved as an illegal instruction.
@ -435,30 +336,6 @@ void NativeCallTrampolineStub::set_destination(address new_destination) {
OrderAccess::release();
}
uint32_t NativeMembar::get_kind() {
uint32_t insn = uint_at(0);
uint32_t predecessor = Assembler::extract(insn, 27, 24);
uint32_t successor = Assembler::extract(insn, 23, 20);
return MacroAssembler::pred_succ_to_membar_mask(predecessor, successor);
}
void NativeMembar::set_kind(uint32_t order_kind) {
uint32_t predecessor = 0;
uint32_t successor = 0;
MacroAssembler::membar_mask_to_pred_succ(order_kind, predecessor, successor);
uint32_t insn = uint_at(0);
address pInsn = (address) &insn;
Assembler::patch(pInsn, 27, 24, predecessor);
Assembler::patch(pInsn, 23, 20, successor);
address membar = addr_at(0);
Assembler::sd_instr(membar, insn);
}
void NativePostCallNop::make_deopt() {
MacroAssembler::assert_alignment(addr_at(0));
NativeDeoptInstruction::insert(addr_at(0));
@ -481,7 +358,7 @@ bool NativePostCallNop::patch(int32_t oopmap_slot, int32_t cb_offset) {
}
int32_t data = (oopmap_slot << 24) | cb_offset;
assert(data != 0, "must be");
assert(is_lui_to_zr_at(addr_at(4)) && is_addiw_to_zr_at(addr_at(8)), "must be");
assert(MacroAssembler::is_lui_to_zr_at(addr_at(4)) && MacroAssembler::is_addiw_to_zr_at(addr_at(8)), "must be");
MacroAssembler::patch_imm_in_li32(addr_at(4), data);
return true; // successfully encoded

@ -27,6 +27,7 @@
#ifndef CPU_RISCV_NATIVEINST_RISCV_HPP
#define CPU_RISCV_NATIVEINST_RISCV_HPP
#include "macroAssembler_riscv.hpp"
#include "asm/assembler.hpp"
#include "runtime/continuation.hpp"
#include "runtime/icache.hpp"
@ -52,198 +53,24 @@ class NativeCall;
class NativeInstruction {
friend class Relocation;
friend bool is_NativeCallTrampolineStub_at(address);
public:
enum {
instruction_size = 4,
compressed_instruction_size = 2,
instruction_size = MacroAssembler::instruction_size,
compressed_instruction_size = MacroAssembler::compressed_instruction_size,
};
juint encoding() const {
return uint_at(0);
}
bool is_jal() const { return is_jal_at(addr_at(0)); }
bool is_movptr() const { return is_movptr1_at(addr_at(0)) ||
is_movptr2_at(addr_at(0)); }
bool is_movptr1() const { return is_movptr1_at(addr_at(0)); }
bool is_movptr2() const { return is_movptr2_at(addr_at(0)); }
bool is_auipc() const { return is_auipc_at(addr_at(0)); }
bool is_call() const { return is_call_at(addr_at(0)); }
bool is_jump() const { return is_jump_at(addr_at(0)); }
static bool is_jal_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1101111; }
static bool is_jalr_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1100111 && extract_funct3(instr) == 0b000; }
static bool is_branch_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1100011; }
static bool is_ld_at(address instr) { assert_cond(instr != nullptr); return is_load_at(instr) && extract_funct3(instr) == 0b011; }
static bool is_load_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0000011; }
static bool is_float_load_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0000111; }
static bool is_auipc_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0010111; }
static bool is_jump_at(address instr) { assert_cond(instr != nullptr); return is_branch_at(instr) || is_jal_at(instr) || is_jalr_at(instr); }
static bool is_add_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0110011 && extract_funct3(instr) == 0b000; }
static bool is_addi_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0010011 && extract_funct3(instr) == 0b000; }
static bool is_addiw_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0011011 && extract_funct3(instr) == 0b000; }
static bool is_addiw_to_zr_at(address instr){ assert_cond(instr != nullptr); return is_addiw_at(instr) && extract_rd(instr) == zr; }
static bool is_lui_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0110111; }
static bool is_lui_to_zr_at(address instr) { assert_cond(instr != nullptr); return is_lui_at(instr) && extract_rd(instr) == zr; }
static bool is_srli_at(address instr) {
assert_cond(instr != nullptr);
return extract_opcode(instr) == 0b0010011 &&
extract_funct3(instr) == 0b101 &&
Assembler::extract(((unsigned*)instr)[0], 31, 26) == 0b000000;
}
static bool is_slli_shift_at(address instr, uint32_t shift) {
assert_cond(instr != nullptr);
return (extract_opcode(instr) == 0b0010011 && // opcode field
extract_funct3(instr) == 0b001 && // funct3 field, select the type of operation
Assembler::extract(Assembler::ld_instr(instr), 25, 20) == shift); // shamt field
}
static Register extract_rs1(address instr);
static Register extract_rs2(address instr);
static Register extract_rd(address instr);
static uint32_t extract_opcode(address instr);
static uint32_t extract_funct3(address instr);
// the instruction sequence of movptr is as below:
// lui
// addi
// slli
// addi
// slli
// addi/jalr/load
static bool check_movptr1_data_dependency(address instr) {
address lui = instr;
address addi1 = lui + instruction_size;
address slli1 = addi1 + instruction_size;
address addi2 = slli1 + instruction_size;
address slli2 = addi2 + instruction_size;
address last_instr = slli2 + instruction_size;
return extract_rs1(addi1) == extract_rd(lui) &&
extract_rs1(addi1) == extract_rd(addi1) &&
extract_rs1(slli1) == extract_rd(addi1) &&
extract_rs1(slli1) == extract_rd(slli1) &&
extract_rs1(addi2) == extract_rd(slli1) &&
extract_rs1(addi2) == extract_rd(addi2) &&
extract_rs1(slli2) == extract_rd(addi2) &&
extract_rs1(slli2) == extract_rd(slli2) &&
extract_rs1(last_instr) == extract_rd(slli2);
}
// the instruction sequence of movptr2 is as below:
// lui
// lui
// slli
// add
// addi/jalr/load
static bool check_movptr2_data_dependency(address instr) {
address lui1 = instr;
address lui2 = lui1 + instruction_size;
address slli = lui2 + instruction_size;
address add = slli + instruction_size;
address last_instr = add + instruction_size;
return extract_rd(add) == extract_rd(lui2) &&
extract_rs1(add) == extract_rd(lui2) &&
extract_rs2(add) == extract_rd(slli) &&
extract_rs1(slli) == extract_rd(lui1) &&
extract_rd(slli) == extract_rd(lui1) &&
extract_rs1(last_instr) == extract_rd(add);
}
// the instruction sequence of li64 is as below:
// lui
// addi
// slli
// addi
// slli
// addi
// slli
// addi
static bool check_li64_data_dependency(address instr) {
address lui = instr;
address addi1 = lui + instruction_size;
address slli1 = addi1 + instruction_size;
address addi2 = slli1 + instruction_size;
address slli2 = addi2 + instruction_size;
address addi3 = slli2 + instruction_size;
address slli3 = addi3 + instruction_size;
address addi4 = slli3 + instruction_size;
return extract_rs1(addi1) == extract_rd(lui) &&
extract_rs1(addi1) == extract_rd(addi1) &&
extract_rs1(slli1) == extract_rd(addi1) &&
extract_rs1(slli1) == extract_rd(slli1) &&
extract_rs1(addi2) == extract_rd(slli1) &&
extract_rs1(addi2) == extract_rd(addi2) &&
extract_rs1(slli2) == extract_rd(addi2) &&
extract_rs1(slli2) == extract_rd(slli2) &&
extract_rs1(addi3) == extract_rd(slli2) &&
extract_rs1(addi3) == extract_rd(addi3) &&
extract_rs1(slli3) == extract_rd(addi3) &&
extract_rs1(slli3) == extract_rd(slli3) &&
extract_rs1(addi4) == extract_rd(slli3) &&
extract_rs1(addi4) == extract_rd(addi4);
}
// the instruction sequence of li16u is as below:
// lui
// srli
static bool check_li16u_data_dependency(address instr) {
address lui = instr;
address srli = lui + instruction_size;
return extract_rs1(srli) == extract_rd(lui) &&
extract_rs1(srli) == extract_rd(srli);
}
// the instruction sequence of li32 is as below:
// lui
// addiw
static bool check_li32_data_dependency(address instr) {
address lui = instr;
address addiw = lui + instruction_size;
return extract_rs1(addiw) == extract_rd(lui) &&
extract_rs1(addiw) == extract_rd(addiw);
}
// the instruction sequence of pc-relative is as below:
// auipc
// jalr/addi/load/float_load
static bool check_pc_relative_data_dependency(address instr) {
address auipc = instr;
address last_instr = auipc + instruction_size;
return extract_rs1(last_instr) == extract_rd(auipc);
}
// the instruction sequence of load_label is as below:
// auipc
// load
static bool check_load_pc_relative_data_dependency(address instr) {
address auipc = instr;
address load = auipc + instruction_size;
return extract_rd(load) == extract_rd(auipc) &&
extract_rs1(load) == extract_rd(load);
}
static bool is_movptr1_at(address instr);
static bool is_movptr2_at(address instr);
static bool is_li16u_at(address instr);
static bool is_li32_at(address instr);
static bool is_li64_at(address instr);
static bool is_pc_relative_at(address branch);
static bool is_load_pc_relative_at(address branch);
static bool is_call_at(address instr) {
if (is_jal_at(instr) || is_jalr_at(instr)) {
return true;
}
return false;
}
static bool is_lwu_to_zr(address instr);
bool is_jal() const { return MacroAssembler::is_jal_at(addr_at(0)); }
bool is_movptr() const { return MacroAssembler::is_movptr1_at(addr_at(0)) ||
MacroAssembler::is_movptr2_at(addr_at(0)); }
bool is_movptr1() const { return MacroAssembler::is_movptr1_at(addr_at(0)); }
bool is_movptr2() const { return MacroAssembler::is_movptr2_at(addr_at(0)); }
bool is_auipc() const { return MacroAssembler::is_auipc_at(addr_at(0)); }
bool is_call() const { return MacroAssembler::is_call_at(addr_at(0)); }
bool is_jump() const { return MacroAssembler::is_jump_at(addr_at(0)); }
inline bool is_nop() const;
inline bool is_jump_or_nop();
@ -272,11 +99,7 @@ class NativeInstruction {
inline friend NativeInstruction* nativeInstruction_at(address addr);
static bool maybe_cpool_ref(address instr) {
return is_auipc_at(instr);
}
bool is_membar() {
return (uint_at(0) & 0x7f) == 0b1111 && extract_funct3(addr_at(0)) == 0;
return MacroAssembler::is_auipc_at(instr);
}
};
@ -332,7 +155,7 @@ class NativeCall: public NativeInstruction {
inline friend NativeCall* nativeCall_before(address return_address);
static bool is_call_before(address return_address) {
return is_call_at(return_address - NativeCall::return_address_offset);
return MacroAssembler::is_call_at(return_address - NativeCall::return_address_offset);
}
// MT-safe patching of a call instruction.
@ -377,9 +200,9 @@ inline NativeCall* nativeCall_before(address return_address) {
class NativeMovConstReg: public NativeInstruction {
public:
enum RISCV_specific_constants {
movptr1_instruction_size = 6 * NativeInstruction::instruction_size, // lui, addi, slli, addi, slli, addi. See movptr1().
movptr2_instruction_size = 5 * NativeInstruction::instruction_size, // lui, lui, slli, add, addi. See movptr2().
load_pc_relative_instruction_size = 2 * NativeInstruction::instruction_size // auipc, ld
movptr1_instruction_size = MacroAssembler::movptr1_instruction_size, // lui, addi, slli, addi, slli, addi. See movptr1().
movptr2_instruction_size = MacroAssembler::movptr2_instruction_size, // lui, lui, slli, add, addi. See movptr2().
load_pc_relative_instruction_size = MacroAssembler::load_pc_relative_instruction_size // auipc, ld
};
address instruction_address() const { return addr_at(0); }
@ -389,23 +212,23 @@ class NativeMovConstReg: public NativeInstruction {
// and the next instruction address should be addr_at(6 * instruction_size).
// However, when the instruction at 5 * instruction_size isn't addi,
// the next instruction address should be addr_at(5 * instruction_size)
if (is_movptr1_at(instruction_address())) {
if (is_addi_at(addr_at(movptr1_instruction_size - NativeInstruction::instruction_size))) {
if (MacroAssembler::is_movptr1_at(instruction_address())) {
if (MacroAssembler::is_addi_at(addr_at(movptr1_instruction_size - NativeInstruction::instruction_size))) {
// Assume: lui, addi, slli, addi, slli, addi
return addr_at(movptr1_instruction_size);
} else {
// Assume: lui, addi, slli, addi, slli
return addr_at(movptr1_instruction_size - NativeInstruction::instruction_size);
}
} else if (is_movptr2_at(instruction_address())) {
if (is_addi_at(addr_at(movptr2_instruction_size - NativeInstruction::instruction_size))) {
} else if (MacroAssembler::is_movptr2_at(instruction_address())) {
if (MacroAssembler::is_addi_at(addr_at(movptr2_instruction_size - NativeInstruction::instruction_size))) {
// Assume: lui, lui, slli, add, addi
return addr_at(movptr2_instruction_size);
} else {
// Assume: lui, lui, slli, add
return addr_at(movptr2_instruction_size - NativeInstruction::instruction_size);
}
} else if (is_load_pc_relative_at(instruction_address())) {
} else if (MacroAssembler::is_load_pc_relative_at(instruction_address())) {
// Assume: auipc, ld
return addr_at(load_pc_relative_instruction_size);
}
@ -548,8 +371,8 @@ class NativeCallTrampolineStub : public NativeInstruction {
enum RISCV_specific_constants {
// Refer to function emit_trampoline_stub.
instruction_size = 3 * NativeInstruction::instruction_size + wordSize, // auipc + ld + jr + target address
data_offset = 3 * NativeInstruction::instruction_size, // auipc + ld + jr
instruction_size = MacroAssembler::trampoline_stub_instruction_size, // auipc + ld + jr + target address
data_offset = MacroAssembler::trampoline_stub_data_offset, // auipc + ld + jr
};
address destination(nmethod *nm = nullptr) const;
@ -557,49 +380,12 @@ class NativeCallTrampolineStub : public NativeInstruction {
ptrdiff_t destination_offset() const;
};
inline bool is_NativeCallTrampolineStub_at(address addr) {
// Ensure that the stub is exactly
// ld t0, L--->auipc + ld
// jr t0
// L:
// judge inst + register + imm
// 1). check the instructions: auipc + ld + jalr
// 2). check if auipc[11:7] == t0 and ld[11:7] == t0 and ld[19:15] == t0 && jr[19:15] == t0
// 3). check if the offset in ld[31:20] equals the data_offset
assert_cond(addr != nullptr);
const int instr_size = NativeInstruction::instruction_size;
if (NativeInstruction::is_auipc_at(addr) &&
NativeInstruction::is_ld_at(addr + instr_size) &&
NativeInstruction::is_jalr_at(addr + 2 * instr_size) &&
(NativeInstruction::extract_rd(addr) == x5) &&
(NativeInstruction::extract_rd(addr + instr_size) == x5) &&
(NativeInstruction::extract_rs1(addr + instr_size) == x5) &&
(NativeInstruction::extract_rs1(addr + 2 * instr_size) == x5) &&
(Assembler::extract(Assembler::ld_instr(addr + 4), 31, 20) == NativeCallTrampolineStub::data_offset)) {
return true;
}
return false;
}
inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) {
assert_cond(addr != nullptr);
assert(is_NativeCallTrampolineStub_at(addr), "no call trampoline found");
assert(MacroAssembler::is_trampoline_stub_at(addr), "no call trampoline found");
return (NativeCallTrampolineStub*)addr;
}
class NativeMembar : public NativeInstruction {
public:
uint32_t get_kind();
void set_kind(uint32_t order_kind);
};
inline NativeMembar *NativeMembar_at(address addr) {
assert_cond(addr != nullptr);
assert(nativeInstruction_at(addr)->is_membar(), "no membar found");
return (NativeMembar*)addr;
}
// A NativePostCallNop takes the form of three instructions:
// nop; lui zr, hi20; addiw zr, lo12
//
@ -613,7 +399,7 @@ public:
// These instructions only ever appear together in a post-call
// NOP, so it's unnecessary to check that the third instruction is
// an addiw as well.
return is_nop() && is_lui_to_zr_at(addr_at(4));
return is_nop() && MacroAssembler::is_lui_to_zr_at(addr_at(4));
}
bool decode(int32_t& oopmap_slot, int32_t& cb_offset) const;
bool patch(int32_t oopmap_slot, int32_t cb_offset);

@ -42,7 +42,7 @@ void Relocation::pd_set_data_value(address x, bool verify_only) {
case relocInfo::oop_type: {
oop_Relocation *reloc = (oop_Relocation *)this;
// in movoop when BarrierSet::barrier_set()->barrier_set_nmethod() isn't null
if (NativeInstruction::is_load_pc_relative_at(addr())) {
if (MacroAssembler::is_load_pc_relative_at(addr())) {
address constptr = (address)code()->oop_addr_at(reloc->oop_index());
bytes = MacroAssembler::pd_patch_instruction_size(addr(), constptr);
assert((address)Bytes::get_native_u8(constptr) == x, "error in oop relocation");
@ -60,7 +60,7 @@ void Relocation::pd_set_data_value(address x, bool verify_only) {
address Relocation::pd_call_destination(address orig_addr) {
assert(is_call(), "should be an address instruction here");
if (NativeCall::is_call_at(addr())) {
if (MacroAssembler::is_call_at(addr())) {
address trampoline = nativeCall_at(addr())->get_trampoline();
if (trampoline != nullptr) {
return nativeCallTrampolineStub_at(trampoline)->destination();
@ -81,7 +81,7 @@ address Relocation::pd_call_destination(address orig_addr) {
void Relocation::pd_set_call_destination(address x) {
assert(is_call(), "should be an address instruction here");
if (NativeCall::is_call_at(addr())) {
if (MacroAssembler::is_call_at(addr())) {
address trampoline = nativeCall_at(addr())->get_trampoline();
if (trampoline != nullptr) {
nativeCall_at(addr())->set_destination_mt_safe(x, /* assert_lock */false);
@ -94,7 +94,7 @@ void Relocation::pd_set_call_destination(address x) {
}
address* Relocation::pd_address_in_code() {
assert(NativeCall::is_load_pc_relative_at(addr()), "Not the expected instruction sequence!");
assert(MacroAssembler::is_load_pc_relative_at(addr()), "Not the expected instruction sequence!");
return (address*)(MacroAssembler::target_addr_for_insn(addr()));
}