8343242: RISC-V: Refactor materialization of literal address

Reviewed-by: rehn, fjiang
This commit is contained in:
Fei Yang 2024-11-04 01:40:20 +00:00
parent d26412e657
commit 37a3398b58
14 changed files with 74 additions and 133 deletions

View File

@ -42,9 +42,7 @@
void C1SafepointPollStub::emit_code(LIR_Assembler* ce) { void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry); __ bind(_entry);
InternalAddress safepoint_pc(__ pc() - __ offset() + safepoint_offset()); InternalAddress safepoint_pc(__ pc() - __ offset() + safepoint_offset());
__ relocate(safepoint_pc.rspec(), [&] { __ la(t0, safepoint_pc);
__ la(t0, safepoint_pc.target());
});
__ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset())); __ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset()));
assert(SharedRuntime::polling_page_return_handler_blob() != nullptr, assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,

View File

@ -333,8 +333,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
const char *name = nullptr; const char *name = nullptr;
address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
CodeBlob *cb = CodeCache::find_blob(entry); if (CodeCache::contains(entry)) {
if (cb != nullptr) {
__ far_call(RuntimeAddress(entry)); __ far_call(RuntimeAddress(entry));
} else { } else {
const int args_num = 3; const int args_num = 3;

View File

@ -1402,9 +1402,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
} }
int pc_for_athrow_offset = __ offset(); int pc_for_athrow_offset = __ offset();
InternalAddress pc_for_athrow(__ pc()); InternalAddress pc_for_athrow(__ pc());
__ relocate(pc_for_athrow.rspec(), [&] { __ la(exceptionPC->as_register(), pc_for_athrow);
__ la(exceptionPC->as_register(), pc_for_athrow.target());
});
add_call_info(pc_for_athrow_offset, info); // for exception handler add_call_info(pc_for_athrow_offset, info); // for exception handler
__ verify_not_null_oop(x10); __ verify_not_null_oop(x10);

View File

@ -81,8 +81,6 @@ define_pd_global(intx, InlineSmallCode, 1000);
range, \ range, \
constraint) \ constraint) \
\ \
product(bool, NearCpool, true, \
"constant pool is close to instructions") \
product(bool, UseBlockZeroing, false, \ product(bool, UseBlockZeroing, false, \
"Use Zicboz for block zeroing") \ "Use Zicboz for block zeroing") \
product(intx, BlockZeroingLowLimit, 256, \ product(intx, BlockZeroingLowLimit, 256, \

View File

@ -191,10 +191,7 @@ void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, i
} }
void InterpreterMacroAssembler::get_dispatch() { void InterpreterMacroAssembler::get_dispatch() {
ExternalAddress target((address)Interpreter::dispatch_table()); la(xdispatch, ExternalAddress((address)Interpreter::dispatch_table()));
relocate(target.rspec(), [&] {
la(xdispatch, target.target());
});
} }
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index, void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,

View File

@ -166,7 +166,7 @@ void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprin
iterate(fingerprint); iterate(fingerprint);
// return result handler // return result handler
__ la(x10, ExternalAddress(Interpreter::result_handler(method()->result_type()))); __ movptr(x10, ExternalAddress(Interpreter::result_handler(method()->result_type())));
__ ret(); __ ret();
__ flush(); __ flush();

View File

@ -73,14 +73,10 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
MacroAssembler* masm = new MacroAssembler(&cbuf); MacroAssembler* masm = new MacroAssembler(&cbuf);
address fast_entry = __ pc(); address fast_entry = __ pc();
Address target(SafepointSynchronize::safepoint_counter_addr());
__ relocate(target.rspec(), [&] {
__ la(rcounter_addr, target.target());
});
Label slow; Label slow;
Address safepoint_counter_addr(rcounter_addr, 0); ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr());
__ lwu(rcounter, safepoint_counter_addr); __ la(rcounter_addr, counter);
__ lwu(rcounter, Address(rcounter_addr));
// An even value means there are no ongoing safepoint operations // An even value means there are no ongoing safepoint operations
__ test_bit(t0, rcounter, 0); __ test_bit(t0, rcounter, 0);
__ bnez(t0, slow); __ bnez(t0, slow);
@ -91,12 +87,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
// Check to see if a field access watch has been set before we // Check to see if a field access watch has been set before we
// take the fast path. // take the fast path.
ExternalAddress target((address) JvmtiExport::get_field_access_count_addr()); __ lwu(result, ExternalAddress(JvmtiExport::get_field_access_count_addr()));
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la(result, target.target(), offset);
__ lwu(result, Address(result, offset));
});
__ bnez(result, slow); __ bnez(result, slow);
__ mv(robj, c_rarg1); __ mv(robj, c_rarg1);
@ -143,7 +134,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
// (LoadStore for volatile field). // (LoadStore for volatile field).
__ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
__ lw(t0, safepoint_counter_addr); __ lw(t0, Address(rcounter_addr));
__ bne(rcounter, t0, slow); __ bne(rcounter, t0, slow);
switch (type) { switch (type) {

View File

@ -531,12 +531,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file,
} }
// Call indirectly to solve generation ordering problem // Call indirectly to solve generation ordering problem
RuntimeAddress target(StubRoutines::verify_oop_subroutine_entry_address()); ld(t1, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address()));
relocate(target.rspec(), [&] {
int32_t offset;
la(t1, target.target(), offset);
ld(t1, Address(t1, offset));
});
jalr(t1); jalr(t1);
pop_reg(RegSet::of(ra, t0, t1, c_rarg0), sp); pop_reg(RegSet::of(ra, t0, t1, c_rarg0), sp);
@ -576,12 +571,7 @@ void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* f
} }
// Call indirectly to solve generation ordering problem // Call indirectly to solve generation ordering problem
RuntimeAddress target(StubRoutines::verify_oop_subroutine_entry_address()); ld(t1, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address()));
relocate(target.rspec(), [&] {
int32_t offset;
la(t1, target.target(), offset);
ld(t1, Address(t1, offset));
});
jalr(t1); jalr(t1);
pop_reg(RegSet::of(ra, t0, t1, c_rarg0), sp); pop_reg(RegSet::of(ra, t0, t1, c_rarg0), sp);
@ -839,17 +829,14 @@ void MacroAssembler::la(Register Rd, const address addr) {
} }
void MacroAssembler::la(Register Rd, const address addr, int32_t &offset) { void MacroAssembler::la(Register Rd, const address addr, int32_t &offset) {
if (is_32bit_offset_from_codecache((int64_t)addr)) { int64_t distance = addr - pc();
int64_t distance = addr - pc(); assert(is_valid_32bit_offset(distance), "Must be");
assert(is_valid_32bit_offset(distance), "Must be"); auipc(Rd, (int32_t)distance + 0x800);
auipc(Rd, (int32_t)distance + 0x800); offset = ((int32_t)distance << 20) >> 20;
offset = ((int32_t)distance << 20) >> 20;
} else {
assert(!CodeCache::contains(addr), "Must be");
movptr(Rd, addr, offset);
}
} }
// Materialize with auipc + addi sequence if adr is a literal
// address inside code cache. Emit a movptr sequence otherwise.
void MacroAssembler::la(Register Rd, const Address &adr) { void MacroAssembler::la(Register Rd, const Address &adr) {
switch (adr.getMode()) { switch (adr.getMode()) {
case Address::literal: { case Address::literal: {
@ -857,9 +844,15 @@ void MacroAssembler::la(Register Rd, const Address &adr) {
if (rtype == relocInfo::none) { if (rtype == relocInfo::none) {
mv(Rd, (intptr_t)(adr.target())); mv(Rd, (intptr_t)(adr.target()));
} else { } else {
relocate(adr.rspec(), [&] { if (CodeCache::contains(adr.target())) {
movptr(Rd, adr.target()); relocate(adr.rspec(), [&] {
}); la(Rd, adr.target());
});
} else {
relocate(adr.rspec(), [&] {
movptr(Rd, adr.target());
});
}
} }
break; break;
} }
@ -975,11 +968,15 @@ void MacroAssembler::j(const address dest, Register temp) {
void MacroAssembler::j(const Address &dest, Register temp) { void MacroAssembler::j(const Address &dest, Register temp) {
switch (dest.getMode()) { switch (dest.getMode()) {
case Address::literal: { case Address::literal: {
relocate(dest.rspec(), [&] { if (CodeCache::contains(dest.target())) {
int32_t offset; far_jump(dest, temp);
la(temp, dest.target(), offset); } else {
jr(temp, offset); relocate(dest.rspec(), [&] {
}); int32_t offset;
movptr(temp, dest.target(), offset);
jr(temp, offset);
});
}
break; break;
} }
case Address::base_plus_offset: { case Address::base_plus_offset: {
@ -1026,14 +1023,13 @@ void MacroAssembler::jalr(Register Rs, int32_t offset) {
void MacroAssembler::rt_call(address dest, Register tmp) { void MacroAssembler::rt_call(address dest, Register tmp) {
assert(tmp != x5, "tmp register must not be x5."); assert(tmp != x5, "tmp register must not be x5.");
CodeBlob *cb = CodeCache::find_blob(dest);
RuntimeAddress target(dest); RuntimeAddress target(dest);
if (cb) { if (CodeCache::contains(dest)) {
far_call(target, tmp); far_call(target, tmp);
} else { } else {
relocate(target.rspec(), [&] { relocate(target.rspec(), [&] {
int32_t offset; int32_t offset;
la(tmp, target.target(), offset); movptr(tmp, target.target(), offset);
jalr(tmp, offset); jalr(tmp, offset);
}); });
} }
@ -1974,16 +1970,18 @@ void MacroAssembler::reinit_heapbase() {
if (Universe::is_fully_initialized()) { if (Universe::is_fully_initialized()) {
mv(xheapbase, CompressedOops::base()); mv(xheapbase, CompressedOops::base());
} else { } else {
ExternalAddress target(CompressedOops::base_addr()); ld(xheapbase, ExternalAddress(CompressedOops::base_addr()));
relocate(target.rspec(), [&] {
int32_t offset;
la(xheapbase, target.target(), offset);
ld(xheapbase, Address(xheapbase, offset));
});
} }
} }
} }
void MacroAssembler::movptr(Register Rd, const Address &addr, Register temp) {
assert(addr.getMode() == Address::literal, "must be applied to a literal address");
relocate(addr.rspec(), [&] {
movptr(Rd, addr.target(), temp);
});
}
void MacroAssembler::movptr(Register Rd, address addr, Register temp) { void MacroAssembler::movptr(Register Rd, address addr, Register temp) {
int offset = 0; int offset = 0;
movptr(Rd, addr, offset, temp); movptr(Rd, addr, offset, temp);
@ -2520,10 +2518,10 @@ void MacroAssembler::movoop(Register dst, jobject obj) {
RelocationHolder rspec = oop_Relocation::spec(oop_index); RelocationHolder rspec = oop_Relocation::spec(oop_index);
if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) { if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) {
la(dst, Address((address)obj, rspec)); movptr(dst, Address((address)obj, rspec));
} else { } else {
address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address
ld_constant(dst, Address(dummy, rspec)); ld(dst, Address(dummy, rspec));
} }
} }
@ -2537,7 +2535,7 @@ void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
oop_index = oop_recorder()->find_index(obj); oop_index = oop_recorder()->find_index(obj);
} }
RelocationHolder rspec = metadata_Relocation::spec(oop_index); RelocationHolder rspec = metadata_Relocation::spec(oop_index);
la(dst, Address((address)obj, rspec)); movptr(dst, Address((address)obj, rspec));
} }
// Writes to stack successive pages until offset reached to check for // Writes to stack successive pages until offset reached to check for
@ -3622,7 +3620,7 @@ void MacroAssembler::atomic_cas(
} }
void MacroAssembler::far_jump(const Address &entry, Register tmp) { void MacroAssembler::far_jump(const Address &entry, Register tmp) {
assert(CodeCache::find_blob(entry.target()) != nullptr, assert(CodeCache::contains(entry.target()),
"destination of far jump not found in code cache"); "destination of far jump not found in code cache");
assert(entry.rspec().type() == relocInfo::external_word_type assert(entry.rspec().type() == relocInfo::external_word_type
|| entry.rspec().type() == relocInfo::runtime_call_type || entry.rspec().type() == relocInfo::runtime_call_type
@ -3641,7 +3639,7 @@ void MacroAssembler::far_jump(const Address &entry, Register tmp) {
void MacroAssembler::far_call(const Address &entry, Register tmp) { void MacroAssembler::far_call(const Address &entry, Register tmp) {
assert(tmp != x5, "tmp register must not be x5."); assert(tmp != x5, "tmp register must not be x5.");
assert(CodeCache::find_blob(entry.target()) != nullptr, assert(CodeCache::contains(entry.target()),
"destination of far call not found in code cache"); "destination of far call not found in code cache");
assert(entry.rspec().type() == relocInfo::external_word_type assert(entry.rspec().type() == relocInfo::external_word_type
|| entry.rspec().type() == relocInfo::runtime_call_type || entry.rspec().type() == relocInfo::runtime_call_type
@ -4493,11 +4491,7 @@ void MacroAssembler::decrementw(const Address dst, int32_t value, Register tmp1,
void MacroAssembler::cmpptr(Register src1, const Address &src2, Label& equal, Register tmp) { void MacroAssembler::cmpptr(Register src1, const Address &src2, Label& equal, Register tmp) {
assert_different_registers(src1, tmp); assert_different_registers(src1, tmp);
assert(src2.getMode() == Address::literal, "must be applied to a literal address"); assert(src2.getMode() == Address::literal, "must be applied to a literal address");
relocate(src2.rspec(), [&] { ld(tmp, src2);
int32_t offset;
la(tmp, src2.target(), offset);
ld(tmp, Address(tmp, offset));
});
beq(src1, tmp, equal); beq(src1, tmp, equal);
} }

View File

@ -832,7 +832,6 @@ public:
compare_and_branch_insn insn, compare_and_branch_insn insn,
compare_and_branch_label_insn neg_insn, bool is_far = false); compare_and_branch_label_insn neg_insn, bool is_far = false);
// la will use movptr instead of GOT when not in reach for auipc.
void la(Register Rd, Label &label); void la(Register Rd, Label &label);
void la(Register Rd, const address addr); void la(Register Rd, const address addr);
void la(Register Rd, const address addr, int32_t &offset); void la(Register Rd, const address addr, int32_t &offset);
@ -866,8 +865,10 @@ public:
// patched to any 48-bit constant, i.e. address. // patched to any 48-bit constant, i.e. address.
// If common case supply additional temp register // If common case supply additional temp register
// to shorten the instruction sequence. // to shorten the instruction sequence.
void movptr(Register Rd, const Address &addr, Register tmp = noreg);
void movptr(Register Rd, address addr, Register tmp = noreg); void movptr(Register Rd, address addr, Register tmp = noreg);
void movptr(Register Rd, address addr, int32_t &offset, Register tmp = noreg); void movptr(Register Rd, address addr, int32_t &offset, Register tmp = noreg);
private: private:
void movptr1(Register Rd, uintptr_t addr, int32_t &offset); void movptr1(Register Rd, uintptr_t addr, int32_t &offset);
void movptr2(Register Rd, uintptr_t addr, int32_t &offset, Register tmp); void movptr2(Register Rd, uintptr_t addr, int32_t &offset, Register tmp);
@ -926,8 +927,9 @@ public:
#define INSN(NAME) \ #define INSN(NAME) \
void NAME(Register Rd, address dest) { \ void NAME(Register Rd, address dest) { \
assert_cond(dest != nullptr); \ assert_cond(dest != nullptr); \
int64_t distance = dest - pc(); \ if (CodeCache::contains(dest)) { \
if (is_valid_32bit_offset(distance)) { \ int64_t distance = dest - pc(); \
assert(is_valid_32bit_offset(distance), "Must be"); \
auipc(Rd, (int32_t)distance + 0x800); \ auipc(Rd, (int32_t)distance + 0x800); \
Assembler::NAME(Rd, Rd, ((int32_t)distance << 20) >> 20); \ Assembler::NAME(Rd, Rd, ((int32_t)distance << 20) >> 20); \
} else { \ } else { \
@ -983,8 +985,9 @@ public:
#define INSN(NAME) \ #define INSN(NAME) \
void NAME(FloatRegister Rd, address dest, Register temp = t0) { \ void NAME(FloatRegister Rd, address dest, Register temp = t0) { \
assert_cond(dest != nullptr); \ assert_cond(dest != nullptr); \
int64_t distance = dest - pc(); \ if (CodeCache::contains(dest)) { \
if (is_valid_32bit_offset(distance)) { \ int64_t distance = dest - pc(); \
assert(is_valid_32bit_offset(distance), "Must be"); \
auipc(temp, (int32_t)distance + 0x800); \ auipc(temp, (int32_t)distance + 0x800); \
Assembler::NAME(Rd, temp, ((int32_t)distance << 20) >> 20); \ Assembler::NAME(Rd, temp, ((int32_t)distance << 20) >> 20); \
} else { \ } else { \
@ -1044,8 +1047,9 @@ public:
void NAME(Register Rs, address dest, Register temp = t0) { \ void NAME(Register Rs, address dest, Register temp = t0) { \
assert_cond(dest != nullptr); \ assert_cond(dest != nullptr); \
assert_different_registers(Rs, temp); \ assert_different_registers(Rs, temp); \
int64_t distance = dest - pc(); \ if (CodeCache::contains(dest)) { \
if (is_valid_32bit_offset(distance)) { \ int64_t distance = dest - pc(); \
assert(is_valid_32bit_offset(distance), "Must be"); \
auipc(temp, (int32_t)distance + 0x800); \ auipc(temp, (int32_t)distance + 0x800); \
Assembler::NAME(Rs, temp, ((int32_t)distance << 20) >> 20); \ Assembler::NAME(Rs, temp, ((int32_t)distance << 20) >> 20); \
} else { \ } else { \
@ -1089,8 +1093,9 @@ public:
#define INSN(NAME) \ #define INSN(NAME) \
void NAME(FloatRegister Rs, address dest, Register temp = t0) { \ void NAME(FloatRegister Rs, address dest, Register temp = t0) { \
assert_cond(dest != nullptr); \ assert_cond(dest != nullptr); \
int64_t distance = dest - pc(); \ if (CodeCache::contains(dest)) { \
if (is_valid_32bit_offset(distance)) { \ int64_t distance = dest - pc(); \
assert(is_valid_32bit_offset(distance), "Must be"); \
auipc(temp, (int32_t)distance + 0x800); \ auipc(temp, (int32_t)distance + 0x800); \
Assembler::NAME(Rs, temp, ((int32_t)distance << 20) >> 20); \ Assembler::NAME(Rs, temp, ((int32_t)distance << 20) >> 20); \
} else { \ } else { \
@ -1613,19 +1618,6 @@ private:
void repne_scan(Register addr, Register value, Register count, Register tmp); void repne_scan(Register addr, Register value, Register count, Register tmp);
void ld_constant(Register dest, const Address &const_addr) {
if (NearCpool) {
ld(dest, const_addr);
} else {
InternalAddress target(const_addr.target());
relocate(target.rspec(), [&] {
int32_t offset;
la(dest, target.target(), offset);
ld(dest, Address(dest, offset));
});
}
}
int bitset_to_regs(unsigned int bitset, unsigned char* regs); int bitset_to_regs(unsigned int bitset, unsigned char* regs);
Address add_memory_helper(const Address dst, Register tmp); Address add_memory_helper(const Address dst, Register tmp);

View File

@ -1266,8 +1266,7 @@ int MachCallRuntimeNode::ret_addr_offset() {
// sd(t0, Address(sp, wordSize)) -> sd // sd(t0, Address(sp, wordSize)) -> sd
// movptr(t1, addr, offset, t0) -> lui + lui + slli + add // movptr(t1, addr, offset, t0) -> lui + lui + slli + add
// jalr(t1, offset) -> jalr // jalr(t1, offset) -> jalr
CodeBlob *cb = CodeCache::find_blob(_entry_point); if (CodeCache::contains(_entry_point)) {
if (cb != nullptr) {
if (UseTrampolines) { if (UseTrampolines) {
return 1 * NativeInstruction::instruction_size; return 1 * NativeInstruction::instruction_size;
} }
@ -2494,8 +2493,7 @@ encode %{
// will be in a reachable segment) otherwise we have to use a jalr // will be in a reachable segment) otherwise we have to use a jalr
// which loads the absolute address into a register. // which loads the absolute address into a register.
address entry = (address)$meth$$method; address entry = (address)$meth$$method;
CodeBlob *cb = CodeCache::find_blob(entry); if (CodeCache::contains(entry)) {
if (cb != nullptr) {
address call = __ reloc_call(Address(entry, relocInfo::runtime_call_type)); address call = __ reloc_call(Address(entry, relocInfo::runtime_call_type));
if (call == nullptr) { if (call == nullptr) {
ciEnv::current()->record_failure("CodeCache is full"); ciEnv::current()->record_failure("CodeCache is full");

View File

@ -2465,13 +2465,7 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index,
// take the time to call into the VM. // take the time to call into the VM.
Label L1; Label L1;
assert_different_registers(cache, index, x10); assert_different_registers(cache, index, x10);
ExternalAddress target(JvmtiExport::get_field_access_count_addr()); __ lwu(x10, ExternalAddress(JvmtiExport::get_field_access_count_addr()));
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la(t0, target.target(), offset);
__ lwu(x10, Address(t0, offset));
});
__ beqz(x10, L1); __ beqz(x10, L1);
__ load_field_entry(c_rarg2, index); __ load_field_entry(c_rarg2, index);
@ -2676,12 +2670,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
// we take the time to call into the VM. // we take the time to call into the VM.
Label L1; Label L1;
assert_different_registers(cache, index, x10); assert_different_registers(cache, index, x10);
ExternalAddress target(JvmtiExport::get_field_modification_count_addr()); __ lwu(x10, ExternalAddress(JvmtiExport::get_field_modification_count_addr()));
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la(t0, target.target(), offset);
__ lwu(x10, Address(t0, offset));
});
__ beqz(x10, L1); __ beqz(x10, L1);
__ mv(c_rarg2, cache); __ mv(c_rarg2, cache);
@ -2969,13 +2958,9 @@ void TemplateTable::jvmti_post_fast_field_mod() {
// Check to see if a field modification watch has been set before // Check to see if a field modification watch has been set before
// we take the time to call into the VM. // we take the time to call into the VM.
Label L2; Label L2;
ExternalAddress target(JvmtiExport::get_field_modification_count_addr()); __ lwu(c_rarg3, ExternalAddress(JvmtiExport::get_field_modification_count_addr()));
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la(t0, target.target(), offset);
__ lwu(c_rarg3, Address(t0, offset));
});
__ beqz(c_rarg3, L2); __ beqz(c_rarg3, L2);
__ pop_ptr(x9); // copy the object pointer from tos __ pop_ptr(x9); // copy the object pointer from tos
__ verify_oop(x9); __ verify_oop(x9);
__ push_ptr(x9); // put the object pointer back on tos __ push_ptr(x9); // put the object pointer back on tos
@ -3101,13 +3086,9 @@ void TemplateTable::fast_accessfield(TosState state) {
// Check to see if a field access watch has been set before we // Check to see if a field access watch has been set before we
// take the time to call into the VM. // take the time to call into the VM.
Label L1; Label L1;
ExternalAddress target(JvmtiExport::get_field_access_count_addr()); __ lwu(x12, ExternalAddress(JvmtiExport::get_field_access_count_addr()));
__ relocate(target.rspec(), [&] {
int32_t offset;
__ la(t0, target.target(), offset);
__ lwu(x12, Address(t0, offset));
});
__ beqz(x12, L1); __ beqz(x12, L1);
// access constant pool cache entry // access constant pool cache entry
__ load_field_entry(c_rarg2, t1); __ load_field_entry(c_rarg2, t1);
__ verify_oop(x10); __ verify_oop(x10);

View File

@ -61,9 +61,6 @@ public class RISCV64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFac
if (config.avoidUnalignedAccesses) { if (config.avoidUnalignedAccesses) {
flags.add(RISCV64.Flag.AvoidUnalignedAccesses); flags.add(RISCV64.Flag.AvoidUnalignedAccesses);
} }
if (config.nearCpool) {
flags.add(RISCV64.Flag.NearCpool);
}
if (config.traceTraps) { if (config.traceTraps) {
flags.add(RISCV64.Flag.TraceTraps); flags.add(RISCV64.Flag.TraceTraps);
} }

View File

@ -45,7 +45,6 @@ class RISCV64HotSpotVMConfig extends HotSpotVMConfigAccess {
*/ */
final boolean useConservativeFence = getFlag("UseConservativeFence", Boolean.class); final boolean useConservativeFence = getFlag("UseConservativeFence", Boolean.class);
final boolean avoidUnalignedAccesses = getFlag("AvoidUnalignedAccesses", Boolean.class); final boolean avoidUnalignedAccesses = getFlag("AvoidUnalignedAccesses", Boolean.class);
final boolean nearCpool = getFlag("NearCpool", Boolean.class);
final boolean traceTraps = getFlag("TraceTraps", Boolean.class); final boolean traceTraps = getFlag("TraceTraps", Boolean.class);
final boolean useRVV = getFlag("UseRVV", Boolean.class); final boolean useRVV = getFlag("UseRVV", Boolean.class);
final boolean useRVC = getFlag("UseRVC", Boolean.class); final boolean useRVC = getFlag("UseRVC", Boolean.class);

View File

@ -166,7 +166,6 @@ public class RISCV64 extends Architecture {
public enum Flag { public enum Flag {
UseConservativeFence, UseConservativeFence,
AvoidUnalignedAccesses, AvoidUnalignedAccesses,
NearCpool,
TraceTraps, TraceTraps,
UseRVV, UseRVV,
UseRVC, UseRVC,