8294087: RISC-V: RVC: Fix a potential alignment issue and add more alignment assertions for the patchable calls/nops

Reviewed-by: shade, fjiang, fyang
This commit is contained in:
Xiaolin Zheng 2022-09-22 11:43:47 +00:00 committed by Fei Yang
parent 3fa6778ab2
commit a216960d71
9 changed files with 30 additions and 5 deletions

@ -1349,7 +1349,7 @@ void LIR_Assembler::align_call(LIR_Code code) {
// With RVC a call instruction may get 2-byte aligned.
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
__ align(4);
__ align(NativeInstruction::instruction_size);
}
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
@ -1372,7 +1372,7 @@ void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
void LIR_Assembler::emit_static_call_stub() {
address call_pc = __ pc();
assert((__ offset() % 4) == 0, "bad alignment");
MacroAssembler::assert_alignment(call_pc);
address stub = __ start_a_stub(call_stub_size());
if (stub == NULL) {
bailout("static call stub overflow");

@ -323,7 +323,7 @@ void C1_MacroAssembler::verified_entry(bool breakAtEntry) {
// first instruction with a jump. For this action to be legal we
// must ensure that this first instruction is a J, JAL or NOP.
// Make it a NOP.
assert_alignment(pc());
nop();
}

@ -255,7 +255,7 @@ void C2_MacroAssembler::emit_entry_barrier_stub(C2EntryBarrierStub* stub) {
bind(stub->guard());
relocate(entry_guard_Relocation::spec());
assert(offset() % 4 == 0, "bad alignment");
assert_alignment(pc());
emit_int32(0); // nmethod guard value
// make sure the stub with a fixed code size
if (alignment_bytes == 2) {

@ -262,7 +262,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo
__ bind(local_guard);
assert(__ offset() % 4 == 0, "bad alignment");
MacroAssembler::assert_alignment(__ pc());
__ emit_int32(0); // nmethod guard value. Skipped over in common case.
__ bind(skip_barrier);
} else {

@ -2828,6 +2828,11 @@ address MacroAssembler::trampoline_call(Address entry) {
}
address call_pc = pc();
#ifdef ASSERT
if (entry.rspec().type() != relocInfo::runtime_call_type) {
assert_alignment(call_pc);
}
#endif
relocate(entry.rspec());
if (!far_branches()) {
jal(entry.target());

@ -30,6 +30,7 @@
#include "asm/assembler.hpp"
#include "code/vmreg.hpp"
#include "metaprogramming/enableIf.hpp"
#include "nativeInst_riscv.hpp"
#include "oops/compressedOops.hpp"
#include "utilities/powerOfTwo.hpp"
@ -49,6 +50,9 @@ class MacroAssembler: public Assembler {
// Alignment
int align(int modulus, int extra_offset = 0);
static inline void assert_alignment(address pc, int alignment = NativeInstruction::instruction_size) {
assert(is_aligned(pc, alignment), "bad alignment");
}
// Stack frame creation/removal
// Note that SP must be updated to the right place before saving/restoring RA and FP

@ -265,6 +265,13 @@ void NativeJump::verify() { }
void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
// Patching to not_entrant can happen while activations of the method are
// in use. The patching in that instance must happen only when certain
// alignment restrictions are true. These guarantees check those
// conditions.
// Must be 4 bytes aligned
MacroAssembler::assert_alignment(verified_entry);
}
@ -355,6 +362,8 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add
nativeInstruction_at(verified_entry)->is_sigill_not_entrant(),
"riscv cannot replace non-jump with jump");
check_verified_entry_alignment(entry, verified_entry);
// Patch this nmethod atomically.
if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
ptrdiff_t offset = dest - verified_entry;

@ -1318,6 +1318,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// insert a nop at the start of the prolog so we can patch in a
// branch if we need to invalidate the method later
MacroAssembler::assert_alignment(__ pc());
__ nop();
assert_cond(C != NULL);
@ -1735,6 +1736,10 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
__ cmp_klass(j_rarg0, t1, t0, skip);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
__ bind(skip);
// These NOPs are critical so that verified entry point is properly
// 4 bytes aligned for patching by NativeJump::patch_verified_entry()
__ align(NativeInstruction::instruction_size);
}
uint MachUEPNode::size(PhaseRegAlloc* ra_) const

@ -938,6 +938,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
int vep_offset = ((intptr_t)__ pc()) - start;
// First instruction must be a nop as it may need to be patched on deoptimisation
MacroAssembler::assert_alignment(__ pc());
__ nop();
gen_special_dispatch(masm,
method,
@ -1089,6 +1090,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// If we have to make this method not-entrant we'll overwrite its
// first instruction with a jump.
MacroAssembler::assert_alignment(__ pc());
__ nop();
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {