8293524: RISC-V: Use macro-assembler functions as appropriate

Reviewed-by: shade, fjiang
This commit is contained in:
Fei Yang 2022-09-09 00:18:48 +00:00
parent 14eb5ad0dc
commit 43e191d64b
13 changed files with 78 additions and 78 deletions

@ -95,14 +95,14 @@ void LIR_Assembler::arraycopy_simple_check(Register src, Register src_pos, Regis
if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(tmp, dst);
__ lw(t0, Address(tmp, in_bytes(Klass::layout_helper_offset())));
__ li(t1, Klass::_lh_neutral_value);
__ mv(t1, Klass::_lh_neutral_value);
__ bge(t0, t1, *stub->entry(), /* is_far */ true);
}
if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
__ load_klass(tmp, src);
__ lw(t0, Address(tmp, in_bytes(Klass::layout_helper_offset())));
__ li(t1, Klass::_lh_neutral_value);
__ mv(t1, Klass::_lh_neutral_value);
__ bge(t0, t1, *stub->entry(), /* is_far */ true);
}
}

@ -1069,7 +1069,7 @@ void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfil
__ ld(t1, recv_addr);
__ bnez(t1, next_test);
__ sd(recv, recv_addr);
__ li(t1, DataLayout::counter_increment);
__ mv(t1, DataLayout::counter_increment);
__ sd(t1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
__ j(*update_done);
__ bind(next_test);
@ -1634,7 +1634,7 @@ void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass,
if (TypeEntries::is_type_none(current_klass)) {
__ beqz(t1, none);
__ li(t0, (u1)TypeEntries::null_seen);
__ mv(t0, (u1)TypeEntries::null_seen);
__ beq(t0, t1, none);
// There is a chance that the checks above (re-reading profiling
// data from memory) fail if another thread has just set the
@ -1684,7 +1684,7 @@ void LIR_Assembler::check_no_conflict(ciKlass* exact_klass, intptr_t current_kla
Label ok;
__ ld(t0, mdo_addr);
__ beqz(t0, ok);
__ li(t1, (u1)TypeEntries::null_seen);
__ mv(t1, (u1)TypeEntries::null_seen);
__ beq(t0, t1, ok);
// may have been set by another thread
__ membar(MacroAssembler::LoadLoad);
@ -2250,7 +2250,7 @@ void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
__ li(t0, c);
__ mv(t0, c);
__ sd(t0, Address(sp, offset_from_rsp_in_bytes));
}

@ -97,7 +97,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
// assuming both the stack pointer and page_size have their least
// significant 2 bits cleared and page_size is a power of 2
sub(hdr, hdr, sp);
li(t0, aligned_mask - os::vm_page_size());
mv(t0, aligned_mask - os::vm_page_size());
andr(hdr, hdr, t0);
// for recursive locking, the result is zero => save it in the displaced header
// location (NULL in the displaced hdr location indicates recursive locking)

@ -861,7 +861,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ check_klass_subtype_slow_path(x14, x10, x12, x15, NULL, &miss);
// fallthrough on success:
__ li(t0, 1);
__ mv(t0, 1);
__ sd(t0, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result
__ pop_reg(RegSet::of(x10, x12, x14, x15), sp);
__ ret();
@ -1051,7 +1051,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
default:
{
StubFrame f(sasm, "unimplemented entry", dont_gc_arguments, does_not_return);
__ li(x10, (int) id);
__ mv(x10, (int)id);
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), x10);
__ should_not_reach_here();
}

@ -845,7 +845,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
bind(L);
// A very short string
li(t0, minCharsInWord);
mv(t0, minCharsInWord);
ble(cnt2, t0, SHORT_STRING);
// Compare longwords
@ -856,7 +856,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
ld(tmp1, Address(str1));
beq(str1, str2, DONE);
ld(tmp2, Address(str2));
li(t0, STUB_THRESHOLD);
mv(t0, STUB_THRESHOLD);
bge(cnt2, t0, STUB);
sub(cnt2, cnt2, minCharsInWord);
beqz(cnt2, TAIL_CHECK);
@ -870,7 +870,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
} else if (isLU) { // LU case
lwu(tmp1, Address(str1));
ld(tmp2, Address(str2));
li(t0, STUB_THRESHOLD);
mv(t0, STUB_THRESHOLD);
bge(cnt2, t0, STUB);
addi(cnt2, cnt2, -4);
add(str1, str1, cnt2);
@ -884,7 +884,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
} else { // UL case
ld(tmp1, Address(str1));
lwu(tmp2, Address(str2));
li(t0, STUB_THRESHOLD);
mv(t0, STUB_THRESHOLD);
bge(cnt2, t0, STUB);
addi(cnt2, cnt2, -4);
slli(t0, cnt2, 1);
@ -1060,7 +1060,7 @@ void C2_MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
assert(elem_size == 1 || elem_size == 2, "must be char or byte");
assert_different_registers(a1, a2, result, cnt1, t0, t1, tmp3, tmp4, tmp5, tmp6);
li(elem_per_word, wordSize / elem_size);
mv(elem_per_word, wordSize / elem_size);
BLOCK_COMMENT("arrays_equals {");

@ -522,7 +522,7 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
if (is_cae) {
__ mv(result, expected);
} else {
__ addi(result, zr, 1);
__ mv(result, 1);
}
__ j(done);

@ -524,7 +524,7 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
bnez(t1, safepoint);
}
if (table == Interpreter::dispatch_table(state)) {
li(t1, Interpreter::distance_from_dispatch_table(state));
mv(t1, Interpreter::distance_from_dispatch_table(state));
add(t1, Rs, t1);
shadd(t1, t1, xdispatch, t1, 3);
} else {
@ -828,7 +828,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
// least significant 3 bits clear.
// NOTE: the oopMark is in swap_reg x10 as the result of cmpxchg
sub(swap_reg, swap_reg, sp);
li(t0, (int64_t)(7 - os::vm_page_size()));
mv(t0, (int64_t)(7 - os::vm_page_size()));
andr(swap_reg, swap_reg, t0);
// Save the test result, for recursive case, the result is zero
@ -1666,7 +1666,7 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md
ld(t0, mdo_addr);
beqz(t0, none);
li(tmp, (u1)TypeEntries::null_seen);
mv(tmp, (u1)TypeEntries::null_seen);
beq(t0, tmp, none);
// There is a chance that the checks above (re-reading profiling
// data from memory) fail if another thread has just set the
@ -1701,10 +1701,10 @@ void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register ca
lbu(t0, Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start));
if (is_virtual) {
li(tmp, (u1)DataLayout::virtual_call_type_data_tag);
mv(tmp, (u1)DataLayout::virtual_call_type_data_tag);
bne(t0, tmp, profile_continue);
} else {
li(tmp, (u1)DataLayout::call_type_data_tag);
mv(tmp, (u1)DataLayout::call_type_data_tag);
bne(t0, tmp, profile_continue);
}
@ -1734,7 +1734,7 @@ void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register ca
mv(index, zr); // index < TypeProfileArgsLimit
bind(loop);
bgtz(index, profileReturnType);
li(t0, (int)MethodData::profile_return());
mv(t0, (int)MethodData::profile_return());
beqz(t0, profileArgument); // (index > 0 || MethodData::profile_return()) == false
bind(profileReturnType);
// If return value type is profiled we may have no argument to profile
@ -1742,7 +1742,7 @@ void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register ca
mv(t1, - TypeStackSlotEntries::per_arg_count());
mul(t1, index, t1);
add(tmp, tmp, t1);
li(t1, TypeStackSlotEntries::per_arg_count());
mv(t1, TypeStackSlotEntries::per_arg_count());
add(t0, mdp, off_to_args);
blt(tmp, t1, done);
@ -1753,8 +1753,8 @@ void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register ca
// stack offset o (zero based) from the start of the argument
// list, for n arguments translates into offset n - o - 1 from
// the end of the argument list
li(t0, stack_slot_offset0);
li(t1, slot_step);
mv(t0, stack_slot_offset0);
mv(t1, slot_step);
mul(t1, index, t1);
add(t0, t0, t1);
add(t0, mdp, t0);
@ -1764,8 +1764,8 @@ void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register ca
Address arg_addr = argument_address(tmp);
ld(tmp, arg_addr);
li(t0, argument_type_offset0);
li(t1, type_step);
mv(t0, argument_type_offset0);
mv(t1, type_step);
mul(t1, index, t1);
add(t0, t0, t1);
add(mdo_addr, mdp, t0);
@ -1777,7 +1777,7 @@ void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register ca
// increment index by 1
addi(index, index, 1);
li(t1, TypeProfileArgsLimit);
mv(t1, TypeProfileArgsLimit);
blt(index, t1, loop);
bind(loopEnd);
@ -1832,13 +1832,13 @@ void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret,
// length
Label do_profile;
lbu(t0, Address(xbcp, 0));
li(tmp, (u1)Bytecodes::_invokedynamic);
mv(tmp, (u1)Bytecodes::_invokedynamic);
beq(t0, tmp, do_profile);
li(tmp, (u1)Bytecodes::_invokehandle);
mv(tmp, (u1)Bytecodes::_invokehandle);
beq(t0, tmp, do_profile);
get_method(tmp);
lhu(t0, Address(tmp, Method::intrinsic_id_offset_in_bytes()));
li(t1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
mv(t1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
bne(t0, t1, profile_continue);
bind(do_profile);
}

@ -738,7 +738,7 @@ void MacroAssembler::la(Register Rd, const Address &adr) {
switch (adr.getMode()) {
case Address::literal: {
if (rtype == relocInfo::none) {
li(Rd, (intptr_t)(adr.target()));
mv(Rd, (intptr_t)(adr.target()));
} else {
movptr(Rd, adr.target());
}
@ -1602,7 +1602,7 @@ void MacroAssembler::andi(Register Rd, Register Rn, int64_t imm, Register tmp) {
and_imm12(Rd, Rn, imm);
} else {
assert_different_registers(Rn, tmp);
li(tmp, imm);
mv(tmp, imm);
andr(Rd, Rn, tmp);
}
}
@ -1616,7 +1616,7 @@ void MacroAssembler::orptr(Address adr, RegisterOrConstant src, Register tmp1, R
ori(tmp1, tmp1, src.as_constant());
} else {
assert_different_registers(tmp1, tmp2);
li(tmp2, src.as_constant());
mv(tmp2, src.as_constant());
orr(tmp1, tmp1, tmp2);
}
}
@ -1858,7 +1858,7 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src, Register
}
assert_different_registers(src, xbase);
li(xbase, (uintptr_t)CompressedKlassPointers::base());
mv(xbase, (uintptr_t)CompressedKlassPointers::base());
if (CompressedKlassPointers::shift() != 0) {
assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
@ -1900,7 +1900,7 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src, Register
}
assert_different_registers(src, xbase);
li(xbase, (intptr_t)CompressedKlassPointers::base());
mv(xbase, (intptr_t)CompressedKlassPointers::base());
sub(dst, src, xbase);
if (CompressedKlassPointers::shift() != 0) {
assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
@ -2053,7 +2053,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
if (itable_index.is_register()) {
slli(t0, itable_index.as_register(), 3);
} else {
li(t0, itable_index.as_constant() << 3);
mv(t0, itable_index.as_constant() << 3);
}
add(recv_klass, recv_klass, t0);
if (itentry_off) {
@ -2243,10 +2243,10 @@ void MacroAssembler::cmpxchg_narrow_value_helper(Register addr, Register expecte
andi(aligned_addr, addr, ~3);
if (size == int8) {
addi(mask, zr, 0xff);
mv(mask, 0xff);
} else {
// size == int16 case
addi(mask, zr, -1);
mv(mask, -1);
zero_extend(mask, mask, 16);
}
sll(mask, mask, shift);
@ -2286,7 +2286,7 @@ void MacroAssembler::cmpxchg_narrow_value(Register addr, Register expected,
bnez(tmp, retry);
if (result_as_bool) {
li(result, 1);
mv(result, 1);
j(done);
bind(fail);
@ -2333,7 +2333,7 @@ void MacroAssembler::weak_cmpxchg_narrow_value(Register addr, Register expected,
bnez(tmp, fail);
// Success
li(result, 1);
mv(result, 1);
j(done);
// Fail
@ -2359,7 +2359,7 @@ void MacroAssembler::cmpxchg(Register addr, Register expected,
// equal, succeed
if (result_as_bool) {
li(result, 1);
mv(result, 1);
} else {
mv(result, expected);
}
@ -2388,7 +2388,7 @@ void MacroAssembler::cmpxchg_weak(Register addr, Register expected,
bnez(t0, fail);
// Success
li(result, 1);
mv(result, 1);
j(done);
// Fail
@ -2636,7 +2636,7 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
add(x15, x15, Array<Klass*>::base_offset_in_bytes());
// Set t0 to an obvious invalid value, falling through by default
li(t0, -1);
mv(t0, -1);
// Scan X12 words at [X15] for an occurrence of X10.
repne_scan(x15, x10, x12, t0);
@ -2694,7 +2694,7 @@ void MacroAssembler::get_thread(Register thread) {
void MacroAssembler::load_byte_map_base(Register reg) {
CardTable::CardValue* byte_map_base =
((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
li(reg, (uint64_t)byte_map_base);
mv(reg, (uint64_t)byte_map_base);
}
void MacroAssembler::la_patchable(Register reg1, const Address &dest, int32_t &offset) {
@ -3056,7 +3056,7 @@ void MacroAssembler::mul_add(Register out, Register in, Register offset,
add(in, in, t0);
const int unroll = 8;
li(tmp, unroll);
mv(tmp, unroll);
blt(len, tmp, L_tail_loop);
bind(L_unroll);
for (int i = 0; i < unroll; i++) {
@ -3546,7 +3546,7 @@ void MacroAssembler::ctzc_bit(Register Rd, Register Rs, bool isLL, Register tmp1
assert_different_registers(Rd, Rs, tmp1, tmp2);
Label Loop;
int step = isLL ? 8 : 16;
li(Rd, -step);
mv(Rd, -step);
mv(tmp2, Rs);
bind(Loop);
@ -3563,7 +3563,7 @@ void MacroAssembler::ctzc_bit(Register Rd, Register Rs, bool isLL, Register tmp1
void MacroAssembler::inflate_lo32(Register Rd, Register Rs, Register tmp1, Register tmp2)
{
assert_different_registers(Rd, Rs, tmp1, tmp2);
li(tmp1, 0xFF);
mv(tmp1, 0xFF);
mv(Rd, zr);
for (int i = 0; i <= 3; i++)
{
@ -3585,7 +3585,7 @@ void MacroAssembler::inflate_lo32(Register Rd, Register Rs, Register tmp1, Regis
void MacroAssembler::inflate_hi32(Register Rd, Register Rs, Register tmp1, Register tmp2)
{
assert_different_registers(Rd, Rs, tmp1, tmp2);
li(tmp1, 0xFF00000000);
mv(tmp1, 0xFF00000000);
mv(Rd, zr);
for (int i = 0; i <= 3; i++)
{
@ -3683,7 +3683,7 @@ void MacroAssembler::zero_words(Register base, u_int64_t cnt)
Register cnt_reg = t0;
Register loop_base = t1;
cnt = cnt - remainder;
li(cnt_reg, cnt);
mv(cnt_reg, cnt);
add(loop_base, base, remainder * wordSize);
bind(loop);
sub(cnt_reg, cnt_reg, unroll);

@ -2102,7 +2102,7 @@ encode %{
Assembler::CompressibleRegion cr(&_masm);
int64_t con = (int64_t)$src$$constant;
Register dst_reg = as_Register($dst$$reg);
__ li(dst_reg, con);
__ mv(dst_reg, con);
%}
enc_class riscv_enc_mov_p(iRegP dst, immP src) %{
@ -2119,7 +2119,7 @@ encode %{
__ mov_metadata(dst_reg, (Metadata*)con);
} else {
assert(rtype == relocInfo::none, "unexpected reloc type");
__ li(dst_reg, $src$$constant);
__ mv(dst_reg, $src$$constant);
}
}
%}
@ -2128,7 +2128,7 @@ encode %{
C2_MacroAssembler _masm(&cbuf);
Assembler::CompressibleRegion cr(&_masm);
Register dst_reg = as_Register($dst$$reg);
__ li(dst_reg, 1);
__ mv(dst_reg, 1);
%}
enc_class riscv_enc_mov_byte_map_base(iRegP dst) %{
@ -2255,7 +2255,7 @@ encode %{
__ bind(miss);
if (!$primary) {
__ li(cr_reg, 1);
__ mv(cr_reg, 1);
}
__ bind(done);
@ -2396,7 +2396,7 @@ encode %{
// Check if the owner is self by comparing the value in the
// markWord of object (disp_hdr) with the stack pointer.
__ sub(disp_hdr, disp_hdr, sp);
__ li(tmp, (intptr_t) (~(os::vm_page_size()-1) | (uintptr_t)markWord::lock_mask_in_place));
__ mv(tmp, (intptr_t) (~(os::vm_page_size()-1) | (uintptr_t)markWord::lock_mask_in_place));
// If (mark & lock_mask) == 0 and mark - sp < page_size, we are stack-locking and goto cont,
// hence we can store 0 as the displaced header in the box, which indicates that it is a
// recursive lock.

@ -437,7 +437,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
__ sd(t0, Address(sp, next_off), /*temp register*/esp);
#ifdef ASSERT
// Overwrite the unused slot with known junk
__ li(t0, 0xdeadffffdeadaaaaul);
__ mv(t0, 0xdeadffffdeadaaaaul);
__ sd(t0, Address(sp, st_off), /*temp register*/esp);
#endif /* ASSERT */
} else {
@ -456,7 +456,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
// long/double in gpr
#ifdef ASSERT
// Overwrite the unused slot with known junk
__ li(t0, 0xdeadffffdeadaaabul);
__ mv(t0, 0xdeadffffdeadaaabul);
__ sd(t0, Address(sp, st_off), /*temp register*/esp);
#endif /* ASSERT */
__ sd(r, Address(sp, next_off));
@ -472,7 +472,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
} else {
#ifdef ASSERT
// Overwrite the unused slot with known junk
__ li(t0, 0xdeadffffdeadaaacul);
__ mv(t0, 0xdeadffffdeadaaacul);
__ sd(t0, Address(sp, st_off), /*temp register*/esp);
#endif /* ASSERT */
__ fsd(r_1->as_FloatRegister(), Address(sp, next_off));
@ -1973,7 +1973,7 @@ void SharedRuntime::generate_deopt_blob() {
// Now it is safe to overwrite any register
// Deopt during an exception. Save exec mode for unpack_frames.
__ li(xcpool, Deoptimization::Unpack_exception); // callee-saved
__ mv(xcpool, Deoptimization::Unpack_exception); // callee-saved
// load throwing pc from JavaThread and patch it as the return address
// of the current frame. Then clear the field in JavaThread
@ -2034,7 +2034,7 @@ void SharedRuntime::generate_deopt_blob() {
__ lwu(xcpool, Address(x15, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
Label noException;
__ li(t0, Deoptimization::Unpack_exception);
__ mv(t0, Deoptimization::Unpack_exception);
__ bne(xcpool, t0, noException); // Was exception pending?
__ ld(x10, Address(xthread, JavaThread::exception_oop_offset()));
__ ld(x13, Address(xthread, JavaThread::exception_pc_offset()));
@ -2113,7 +2113,7 @@ void SharedRuntime::generate_deopt_blob() {
__ sub(sp, sp, x9);
// Push interpreter frames in a loop
__ li(t0, 0xDEADDEAD); // Make a recognizable pattern
__ mv(t0, 0xDEADDEAD); // Make a recognizable pattern
__ mv(t1, t0);
Label loop;
__ bind(loop);

@ -355,13 +355,13 @@ class StubGenerator: public StubCodeGenerator {
__ ld(j_rarg2, result);
Label is_long, is_float, is_double, exit;
__ ld(j_rarg1, result_type);
__ li(t0, (u1)T_OBJECT);
__ mv(t0, (u1)T_OBJECT);
__ beq(j_rarg1, t0, is_long);
__ li(t0, (u1)T_LONG);
__ mv(t0, (u1)T_LONG);
__ beq(j_rarg1, t0, is_long);
__ li(t0, (u1)T_FLOAT);
__ mv(t0, (u1)T_FLOAT);
__ beq(j_rarg1, t0, is_float);
__ li(t0, (u1)T_DOUBLE);
__ mv(t0, (u1)T_DOUBLE);
__ beq(j_rarg1, t0, is_double);
// handle T_INT case
@ -753,7 +753,7 @@ class StubGenerator: public StubCodeGenerator {
{
Label L;
__ li(t0, 8);
__ mv(t0, 8);
__ bge(count, t0, L);
__ stop("genrate_copy_longs called with < 8 words");
__ bind(L);
@ -1964,7 +1964,7 @@ class StubGenerator: public StubCodeGenerator {
}
__ BIND(L_failed);
__ li(x10, -1);
__ mv(x10, -1);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret();
@ -2917,7 +2917,7 @@ class StubGenerator: public StubCodeGenerator {
__ beqz(numIter, exit);
__ shadd(newArr, newIdx, newArr, t0, 2);
__ li(shiftRevCount, 32);
__ mv(shiftRevCount, 32);
__ sub(shiftRevCount, shiftRevCount, shiftCount);
__ bind(loop);
@ -2971,7 +2971,7 @@ class StubGenerator: public StubCodeGenerator {
__ beqz(idx, exit);
__ shadd(newArr, newIdx, newArr, t0, 2);
__ li(shiftRevCount, 32);
__ mv(shiftRevCount, 32);
__ sub(shiftRevCount, shiftRevCount, shiftCount);
__ bind(loop);
@ -3272,7 +3272,7 @@ class StubGenerator: public StubCodeGenerator {
ld(Rm, Address(Rm));
add(Rn, Pn_base, Rn);
ld(Rn, Address(Rn));
li(t0, 1); // set carry flag, i.e. no borrow
mv(t0, 1); // set carry flag, i.e. no borrow
align(16);
bind(loop); {
notr(Rn, Rn);
@ -3438,7 +3438,7 @@ class StubGenerator: public StubCodeGenerator {
enter();
// Make room.
li(Ra, 512);
mv(Ra, 512);
bgt(Rlen, Ra, argh);
slli(Ra, Rlen, exact_log2(4 * sizeof(jint)));
sub(Ra, sp, Ra);
@ -3464,7 +3464,7 @@ class StubGenerator: public StubCodeGenerator {
{
ld(Rn, Address(Pn_base));
mul(Rlo_mn, Rn, inv);
li(t0, -1);
mv(t0, -1);
Label ok;
beq(Rlo_mn, t0, ok);
stop("broken inverse in Montgomery multiply");
@ -3560,7 +3560,7 @@ class StubGenerator: public StubCodeGenerator {
enter();
// Make room.
li(Ra, 512);
mv(Ra, 512);
bgt(Rlen, Ra, argh);
slli(Ra, Rlen, exact_log2(4 * sizeof(jint)));
sub(Ra, sp, Ra);

@ -526,7 +526,7 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
address entry = __ pc();
__ push(state);
__ call_VM(noreg, runtime_entry);
__ fence(0xf, 0xf);
__ membar(MacroAssembler::AnyAny);
__ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
return entry;
}
@ -1598,7 +1598,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
Label L_done;
__ lbu(t0, Address(xbcp, 0));
__ li(t1, Bytecodes::_invokestatic);
__ mv(t1, Bytecodes::_invokestatic);
__ bne(t1, t0, L_done);
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
@ -1735,7 +1735,7 @@ void TemplateInterpreterGenerator::count_bytecode() {
__ push_reg(t0);
__ push_reg(x10);
__ mv(x10, (address) &BytecodeCounter::_counter_value);
__ li(t0, 1);
__ mv(t0, 1);
__ amoadd_d(zr, x10, t0, Assembler::aqrl);
__ pop_reg(x10);
__ pop_reg(t0);

@ -230,13 +230,13 @@ void TemplateTable::aconst_null()
void TemplateTable::iconst(int value)
{
transition(vtos, itos);
__ li(x10, value);
__ mv(x10, value);
}
void TemplateTable::lconst(int value)
{
transition(vtos, ltos);
__ li(x10, value);
__ mv(x10, value);
}
void TemplateTable::fconst(int value)
@ -3685,7 +3685,7 @@ void TemplateTable::instanceof() {
__ j(done);
// Come here on success
__ bind(ok_is_subtype);
__ li(x10, 1);
__ mv(x10, 1);
// Collect counts on whether this test sees NULLs a lot or not.
if (ProfileInterpreter) {