8241503: C2: Share MacroAssembler between mach nodes during code emission

Reviewed-by: kvn, mdoerr, amitkumar, lucy
This commit is contained in:
Cesar Soares Lucas 2024-04-11 15:44:49 +00:00 committed by Martin Doerr
parent 0656f08092
commit 31ee5108e0
60 changed files with 1713 additions and 2026 deletions

View File

@ -1148,8 +1148,8 @@ class HandlerImpl {
public:
static int emit_exception_handler(CodeBuffer &cbuf);
static int emit_deopt_handler(CodeBuffer& cbuf);
static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
return MacroAssembler::far_codestub_branch_size();
@ -1602,7 +1602,7 @@ bool needs_acquiring_load_exclusive(const Node *n)
return true;
}
#define __ _masm.
#define __ masm->
// advance declarations for helper functions to convert register
// indices to register objects
@ -1657,8 +1657,7 @@ void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
__ brk(0);
}
@ -1674,8 +1673,7 @@ uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
}
#endif
void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
C2_MacroAssembler _masm(&cbuf);
void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
for (int i = 0; i < _count; i++) {
__ nop();
}
@ -1697,7 +1695,7 @@ void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, Phase
ShouldNotReachHere();
}
void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
// Empty encoding
}
@ -1751,9 +1749,8 @@ void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
C2_MacroAssembler _masm(&cbuf);
// n.b. frame size includes space for return pc and rfp
const int framesize = C->output()->frame_size_in_bytes();
@ -1802,7 +1799,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
guard = &stub->guard();
}
// In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
bs->nmethod_entry_barrier(&_masm, slow_path, continuation, guard);
bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
}
}
@ -1810,7 +1807,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
Unimplemented();
}
C->output()->set_frame_complete(cbuf.insts_size());
C->output()->set_frame_complete(__ offset());
if (C->has_mach_constant_base_node()) {
// NOTE: We set the table base offset here because users might be
@ -1864,9 +1861,8 @@ void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
C2_MacroAssembler _masm(&cbuf);
int framesize = C->output()->frame_slots() << LogBytesPerInt;
__ remove_frame(framesize);
@ -1938,7 +1934,7 @@ static enum RC rc_class(OptoReg::Name reg) {
return rc_stack;
}
uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
Compile* C = ra_->C;
// Get registers to move.
@ -1971,8 +1967,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
uint ireg = ideal_reg();
if (ireg == Op_VecA && cbuf) {
C2_MacroAssembler _masm(cbuf);
if (ireg == Op_VecA && masm) {
int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
// stack->stack
@ -1991,9 +1986,8 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
} else {
ShouldNotReachHere();
}
} else if (cbuf) {
} else if (masm) {
assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
C2_MacroAssembler _masm(cbuf);
assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
// stack->stack
@ -2020,8 +2014,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
ShouldNotReachHere();
}
}
} else if (cbuf) {
C2_MacroAssembler _masm(cbuf);
} else if (masm) {
switch (src_lo_rc) {
case rc_int:
if (dst_lo_rc == rc_int) { // gpr --> gpr copy
@ -2029,7 +2022,6 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
__ mov(as_Register(Matcher::_regEncode[dst_lo]),
as_Register(Matcher::_regEncode[src_lo]));
} else {
C2_MacroAssembler _masm(cbuf);
__ movw(as_Register(Matcher::_regEncode[dst_lo]),
as_Register(Matcher::_regEncode[src_lo]));
}
@ -2157,8 +2149,8 @@ void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
implementation(&cbuf, ra_, false, nullptr);
void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
implementation(masm, ra_, false, nullptr);
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
@ -2176,9 +2168,7 @@ void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_encode(this);
@ -2217,10 +2207,8 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
}
#endif
void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
{
// This is the unverified entry point.
C2_MacroAssembler _masm(&cbuf);
__ ic_check(InteriorEntryAlignment);
}
@ -2234,13 +2222,12 @@ uint MachUEPNode::size(PhaseRegAlloc* ra_) const
//=============================================================================
// Emit exception handler code.
int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
{
// mov rscratch1 #exception_blob_entry_point
// br rscratch1
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@ -2254,11 +2241,10 @@ int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
}
// Emit deopt handler code.
int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
{
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_deopt_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@ -2677,7 +2663,6 @@ bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack,
}
#define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN) \
C2_MacroAssembler _masm(&cbuf); \
{ \
guarantee(INDEX == -1, "mode not permitted for volatile"); \
guarantee(DISP == 0, "mode not permitted for volatile"); \
@ -2722,7 +2707,7 @@ typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
// Used for all non-volatile memory accesses. The use of
// $mem->opcode() to discover whether this pattern uses sign-extended
// offsets is something of a kludge.
static void loadStore(C2_MacroAssembler masm, mem_insn insn,
static void loadStore(C2_MacroAssembler* masm, mem_insn insn,
Register reg, int opcode,
Register base, int index, int scale, int disp,
int size_in_memory)
@ -2732,12 +2717,12 @@ typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
/* Fix up any out-of-range offsets. */
assert_different_registers(rscratch1, base);
assert_different_registers(rscratch1, reg);
addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
addr = __ legitimize_address(addr, size_in_memory, rscratch1);
}
(masm.*insn)(reg, addr);
(masm->*insn)(reg, addr);
}
static void loadStore(C2_MacroAssembler masm, mem_float_insn insn,
static void loadStore(C2_MacroAssembler* masm, mem_float_insn insn,
FloatRegister reg, int opcode,
Register base, int index, int size, int disp,
int size_in_memory)
@ -2760,23 +2745,23 @@ typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
/* Fix up any out-of-range offsets. */
assert_different_registers(rscratch1, base);
Address addr = Address(base, disp);
addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
(masm.*insn)(reg, addr);
addr = __ legitimize_address(addr, size_in_memory, rscratch1);
(masm->*insn)(reg, addr);
} else {
assert(disp == 0, "unsupported address mode: disp = %d", disp);
(masm.*insn)(reg, Address(base, as_Register(index), scale));
(masm->*insn)(reg, Address(base, as_Register(index), scale));
}
}
static void loadStore(C2_MacroAssembler masm, mem_vector_insn insn,
static void loadStore(C2_MacroAssembler* masm, mem_vector_insn insn,
FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
int opcode, Register base, int index, int size, int disp)
{
if (index == -1) {
(masm.*insn)(reg, T, Address(base, disp));
(masm->*insn)(reg, T, Address(base, disp));
} else {
assert(disp == 0, "unsupported address mode");
(masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
(masm->*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
}
}
@ -2821,7 +2806,6 @@ encode %{
// catch all for unimplemented encodings
enc_class enc_unimplemented %{
C2_MacroAssembler _masm(&cbuf);
__ unimplemented("C2 catch all");
%}
@ -2831,7 +2815,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
@ -2839,7 +2823,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
@ -2847,7 +2831,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
@ -2855,7 +2839,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
@ -2863,7 +2847,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
@ -2871,7 +2855,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
@ -2879,7 +2863,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
@ -2887,7 +2871,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
@ -2895,7 +2879,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
@ -2903,7 +2887,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
@ -2911,7 +2895,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
@ -2919,7 +2903,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldr, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
%}
@ -2927,7 +2911,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
@ -2935,7 +2919,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
%}
@ -2943,15 +2927,14 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
Register src_reg = as_Register($src$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::strb, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strb0(memory1 mem) %{
C2_MacroAssembler _masm(&cbuf);
loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
@ -2959,15 +2942,14 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
Register src_reg = as_Register($src$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::strh, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strh0(memory2 mem) %{
C2_MacroAssembler _masm(&cbuf);
loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
loadStore(masm, &MacroAssembler::strh, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
@ -2975,15 +2957,14 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
Register src_reg = as_Register($src$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::strw, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strw0(memory4 mem) %{
C2_MacroAssembler _masm(&cbuf);
loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
loadStore(masm, &MacroAssembler::strw, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
@ -2994,20 +2975,18 @@ encode %{
// we sometimes get asked to store the stack pointer into the
// current thread -- we cannot do that directly on AArch64
if (src_reg == r31_sp) {
C2_MacroAssembler _masm(&cbuf);
assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
__ mov(rscratch2, sp);
src_reg = rscratch2;
}
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::str, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_str0(memory8 mem) %{
C2_MacroAssembler _masm(&cbuf);
loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
loadStore(masm, &MacroAssembler::str, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
%}
@ -3015,7 +2994,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::strs, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
@ -3023,16 +3002,15 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::strd, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
C2_MacroAssembler _masm(&cbuf);
__ membar(Assembler::StoreStore);
loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
@ -3041,49 +3019,49 @@ encode %{
// Vector loads and stores
enc_class aarch64_enc_ldrvH(vReg dst, memory mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_ldrvS(vReg dst, memory mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_ldrvD(vReg dst, memory mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_ldrvQ(vReg dst, memory mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_strvH(vReg src, memory mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::H,
loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::H,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_strvS(vReg src, memory mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::S,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_strvD(vReg src, memory mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::D,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_strvQ(vReg src, memory mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::Q,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@ -3199,7 +3177,6 @@ encode %{
// we sometimes get asked to store the stack pointer into the
// current thread -- we cannot do that directly on AArch64
if (src_reg == r31_sp) {
C2_MacroAssembler _masm(&cbuf);
assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
__ mov(rscratch2, sp);
src_reg = rscratch2;
@ -3215,7 +3192,6 @@ encode %{
enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
{
C2_MacroAssembler _masm(&cbuf);
FloatRegister src_reg = as_FloatRegister($src$$reg);
__ fmovs(rscratch2, src_reg);
}
@ -3225,7 +3201,6 @@ encode %{
enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
{
C2_MacroAssembler _masm(&cbuf);
FloatRegister src_reg = as_FloatRegister($src$$reg);
__ fmovd(rscratch2, src_reg);
}
@ -3236,7 +3211,6 @@ encode %{
// synchronized read/update encodings
enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register base = as_Register($mem$$base);
int index = $mem$$index;
@ -3265,7 +3239,6 @@ encode %{
%}
enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
C2_MacroAssembler _masm(&cbuf);
Register src_reg = as_Register($src$$reg);
Register base = as_Register($mem$$base);
int index = $mem$$index;
@ -3295,7 +3268,6 @@ encode %{
%}
enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
C2_MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
@ -3303,7 +3275,6 @@ encode %{
%}
enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
C2_MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
@ -3311,7 +3282,6 @@ encode %{
%}
enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
C2_MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ false, /*release*/ true,
@ -3319,7 +3289,6 @@ encode %{
%}
enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
C2_MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ false, /*release*/ true,
@ -3332,7 +3301,6 @@ encode %{
// CompareAndSwap sequence to serve as a barrier on acquiring a
// lock.
enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
C2_MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
@ -3340,7 +3308,6 @@ encode %{
%}
enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
C2_MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
@ -3348,7 +3315,6 @@ encode %{
%}
enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
C2_MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ true, /*release*/ true,
@ -3356,7 +3322,6 @@ encode %{
%}
enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
C2_MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ true, /*release*/ true,
@ -3365,7 +3330,6 @@ encode %{
// auxiliary used for CompareAndSwapX to set result register
enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
C2_MacroAssembler _masm(&cbuf);
Register res_reg = as_Register($res$$reg);
__ cset(res_reg, Assembler::EQ);
%}
@ -3373,7 +3337,6 @@ encode %{
// prefetch encodings
enc_class aarch64_enc_prefetchw(memory mem) %{
C2_MacroAssembler _masm(&cbuf);
Register base = as_Register($mem$$base);
int index = $mem$$index;
int scale = $mem$$scale;
@ -3394,7 +3357,6 @@ encode %{
/// mov envcodings
enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
C2_MacroAssembler _masm(&cbuf);
uint32_t con = (uint32_t)$src$$constant;
Register dst_reg = as_Register($dst$$reg);
if (con == 0) {
@ -3405,7 +3367,6 @@ encode %{
%}
enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
uint64_t con = (uint64_t)$src$$constant;
if (con == 0) {
@ -3416,7 +3377,6 @@ encode %{
%}
enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
if (con == nullptr || con == (address)1) {
@ -3442,24 +3402,20 @@ encode %{
%}
enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
__ mov(dst_reg, zr);
%}
enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
__ mov(dst_reg, (uint64_t)1);
%}
enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
C2_MacroAssembler _masm(&cbuf);
__ load_byte_map_base($dst$$Register);
%}
enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
if (con == nullptr) {
@ -3472,13 +3428,11 @@ encode %{
%}
enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
__ mov(dst_reg, zr);
%}
enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
if (con == nullptr) {
@ -3493,7 +3447,6 @@ encode %{
// arithmetic encodings
enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src_reg = as_Register($src1$$reg);
int32_t con = (int32_t)$src2$$constant;
@ -3507,7 +3460,6 @@ encode %{
%}
enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src_reg = as_Register($src1$$reg);
int32_t con = (int32_t)$src2$$constant;
@ -3521,7 +3473,6 @@ encode %{
%}
enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@ -3529,7 +3480,6 @@ encode %{
%}
enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@ -3537,7 +3487,6 @@ encode %{
%}
enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@ -3545,7 +3494,6 @@ encode %{
%}
enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@ -3555,14 +3503,12 @@ encode %{
// compare instruction encodings
enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
C2_MacroAssembler _masm(&cbuf);
Register reg1 = as_Register($src1$$reg);
Register reg2 = as_Register($src2$$reg);
__ cmpw(reg1, reg2);
%}
enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
C2_MacroAssembler _masm(&cbuf);
Register reg = as_Register($src1$$reg);
int32_t val = $src2$$constant;
if (val >= 0) {
@ -3573,7 +3519,6 @@ encode %{
%}
enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
C2_MacroAssembler _masm(&cbuf);
Register reg1 = as_Register($src1$$reg);
uint32_t val = (uint32_t)$src2$$constant;
__ movw(rscratch1, val);
@ -3581,14 +3526,12 @@ encode %{
%}
enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
C2_MacroAssembler _masm(&cbuf);
Register reg1 = as_Register($src1$$reg);
Register reg2 = as_Register($src2$$reg);
__ cmp(reg1, reg2);
%}
enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
C2_MacroAssembler _masm(&cbuf);
Register reg = as_Register($src1$$reg);
int64_t val = $src2$$constant;
if (val >= 0) {
@ -3603,7 +3546,6 @@ encode %{
%}
enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
C2_MacroAssembler _masm(&cbuf);
Register reg1 = as_Register($src1$$reg);
uint64_t val = (uint64_t)$src2$$constant;
__ mov(rscratch1, val);
@ -3611,45 +3553,38 @@ encode %{
%}
enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
C2_MacroAssembler _masm(&cbuf);
Register reg1 = as_Register($src1$$reg);
Register reg2 = as_Register($src2$$reg);
__ cmp(reg1, reg2);
%}
enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
C2_MacroAssembler _masm(&cbuf);
Register reg1 = as_Register($src1$$reg);
Register reg2 = as_Register($src2$$reg);
__ cmpw(reg1, reg2);
%}
enc_class aarch64_enc_testp(iRegP src) %{
C2_MacroAssembler _masm(&cbuf);
Register reg = as_Register($src$$reg);
__ cmp(reg, zr);
%}
enc_class aarch64_enc_testn(iRegN src) %{
C2_MacroAssembler _masm(&cbuf);
Register reg = as_Register($src$$reg);
__ cmpw(reg, zr);
%}
enc_class aarch64_enc_b(label lbl) %{
C2_MacroAssembler _masm(&cbuf);
Label *L = $lbl$$label;
__ b(*L);
%}
enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
C2_MacroAssembler _masm(&cbuf);
Label *L = $lbl$$label;
__ br ((Assembler::Condition)$cmp$$cmpcode, *L);
%}
enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
C2_MacroAssembler _masm(&cbuf);
Label *L = $lbl$$label;
__ br ((Assembler::Condition)$cmp$$cmpcode, *L);
%}
@ -3662,7 +3597,6 @@ encode %{
Register result_reg = as_Register($result$$reg);
Label miss;
C2_MacroAssembler _masm(&cbuf);
__ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
nullptr, &miss,
/*set_cond_codes:*/ true);
@ -3673,8 +3607,6 @@ encode %{
%}
enc_class aarch64_enc_java_static_call(method meth) %{
C2_MacroAssembler _masm(&cbuf);
address addr = (address)$meth$$method;
address call;
if (!_method) {
@ -3690,7 +3622,7 @@ encode %{
__ nop();
__ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
} else {
int method_index = resolved_method_index(cbuf);
int method_index = resolved_method_index(masm);
RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
: static_call_Relocation::spec(method_index);
call = __ trampoline_call(Address(addr, rspec));
@ -3701,10 +3633,10 @@ encode %{
if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
// Calls of the same statically bound method can share
// a stub to the interpreter.
cbuf.shared_stub_to_interp_for(_method, call - cbuf.insts_begin());
__ code()->shared_stub_to_interp_for(_method, call - __ begin());
} else {
// Emit stub for static call
address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, call);
address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@ -3721,8 +3653,7 @@ encode %{
%}
enc_class aarch64_enc_java_dynamic_call(method meth) %{
C2_MacroAssembler _masm(&cbuf);
int method_index = resolved_method_index(cbuf);
int method_index = resolved_method_index(masm);
address call = __ ic_call((address)$meth$$method, method_index);
if (call == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@ -3735,7 +3666,6 @@ encode %{
%}
enc_class aarch64_enc_call_epilog() %{
C2_MacroAssembler _masm(&cbuf);
if (VerifyStackAtCalls) {
// Check that stack depth is unchanged: find majik cookie on stack
__ call_Unimplemented();
@ -3743,8 +3673,6 @@ encode %{
%}
enc_class aarch64_enc_java_to_runtime(method meth) %{
C2_MacroAssembler _masm(&cbuf);
// some calls to generated routines (arraycopy code) are scheduled
// by C2 as runtime calls. if so we can call them using a br (they
// will be in a reachable segment) otherwise we have to use a blr
@ -3775,12 +3703,10 @@ encode %{
%}
enc_class aarch64_enc_rethrow() %{
C2_MacroAssembler _masm(&cbuf);
__ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
%}
enc_class aarch64_enc_ret() %{
C2_MacroAssembler _masm(&cbuf);
#ifdef ASSERT
if (Compile::current()->max_vector_size() > 0) {
__ verify_ptrue();
@ -3790,13 +3716,11 @@ encode %{
%}
enc_class aarch64_enc_tail_call(iRegP jump_target) %{
C2_MacroAssembler _masm(&cbuf);
Register target_reg = as_Register($jump_target$$reg);
__ br(target_reg);
%}
enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
C2_MacroAssembler _masm(&cbuf);
Register target_reg = as_Register($jump_target$$reg);
// exception oop should be in r0
// ret addr has been popped into lr
@ -4565,7 +4489,7 @@ operand immP()
interface(CONST_INTER);
%}
// Null Pointer Immediate
// nullptr Pointer Immediate
operand immP0()
%{
predicate(n->get_ptr() == 0);
@ -4673,7 +4597,7 @@ operand immN()
interface(CONST_INTER);
%}
// Narrow Null Pointer Immediate
// Narrow nullptr Pointer Immediate
operand immN0()
%{
predicate(n->get_narrowcon() == 0);
@ -6768,7 +6692,7 @@ instruct loadConP0(iRegPNoSp dst, immP0 con)
match(Set dst con);
ins_cost(INSN_COST);
format %{ "mov $dst, $con\t# null pointer" %}
format %{ "mov $dst, $con\t# nullptr ptr" %}
ins_encode(aarch64_enc_mov_p0(dst, con));
@ -6782,7 +6706,7 @@ instruct loadConP1(iRegPNoSp dst, immP_1 con)
match(Set dst con);
ins_cost(INSN_COST);
format %{ "mov $dst, $con\t# null pointer" %}
format %{ "mov $dst, $con\t# nullptr ptr" %}
ins_encode(aarch64_enc_mov_p1(dst, con));
@ -6824,7 +6748,7 @@ instruct loadConN0(iRegNNoSp dst, immN0 con)
match(Set dst con);
ins_cost(INSN_COST);
format %{ "mov $dst, $con\t# compressed null pointer" %}
format %{ "mov $dst, $con\t# compressed nullptr ptr" %}
ins_encode(aarch64_enc_mov_n0(dst, con));
@ -7735,7 +7659,7 @@ instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
"mov $dst, $tmp\t# vector (1D)" %}
ins_encode %{
FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
__ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
__ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
@ -7776,7 +7700,7 @@ instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
"mov $dst, $tmp\t# vector (1D)" %}
ins_encode %{
FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
__ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
__ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
@ -16870,7 +16794,7 @@ instruct compressBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
"mov $dst, $tdst"
%}
ins_encode %{
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
__ ldrs($tmask$$FloatRegister, $constantaddress($mask));
__ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
@ -16907,7 +16831,7 @@ instruct compressBitsL_memcon(iRegLNoSp dst, memory8 mem, immL mask,
"mov $dst, $tdst"
%}
ins_encode %{
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
__ ldrd($tmask$$FloatRegister, $constantaddress($mask));
__ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
@ -16944,7 +16868,7 @@ instruct expandBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
"mov $dst, $tdst"
%}
ins_encode %{
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
__ ldrs($tmask$$FloatRegister, $constantaddress($mask));
__ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
@ -16982,7 +16906,7 @@ instruct expandBitsL_memcon(iRegINoSp dst, memory8 mem, immL mask,
"mov $dst, $tdst"
%}
ins_encode %{
loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
__ ldrd($tmask$$FloatRegister, $constantaddress($mask));
__ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);

View File

@ -94,7 +94,7 @@ source %{
PRegister Pg, const Address &adr);
// Predicated load/store, with optional ptrue to all elements of given predicate register.
static void loadStoreA_predicated(C2_MacroAssembler masm, bool is_store, FloatRegister reg,
static void loadStoreA_predicated(C2_MacroAssembler* masm, bool is_store, FloatRegister reg,
PRegister pg, BasicType mem_elem_bt, BasicType vector_elem_bt,
int opcode, Register base, int index, int size, int disp) {
sve_mem_insn_predicate insn;
@ -119,7 +119,7 @@ source %{
ShouldNotReachHere();
}
int imm4 = disp / mesize / Matcher::scalable_vector_reg_size(vector_elem_bt);
(masm.*insn)(reg, Assembler::elemType_to_regVariant(vector_elem_bt), pg, Address(base, imm4));
(masm->*insn)(reg, Assembler::elemType_to_regVariant(vector_elem_bt), pg, Address(base, imm4));
} else {
assert(false, "unimplemented");
ShouldNotReachHere();
@ -422,7 +422,7 @@ instruct loadV(vReg dst, vmemA mem) %{
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ false,
loadStoreA_predicated(masm, /* is_store */ false,
$dst$$FloatRegister, ptrue, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@ -439,7 +439,7 @@ instruct storeV(vReg src, vmemA mem) %{
BasicType bt = Matcher::vector_element_basic_type(this, $src);
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $src);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ true,
loadStoreA_predicated(masm, /* is_store */ true,
$src$$FloatRegister, ptrue, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@ -454,7 +454,7 @@ instruct loadV_masked(vReg dst, vmemA mem, pRegGov pg) %{
format %{ "loadV_masked $dst, $pg, $mem" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ false, $dst$$FloatRegister,
loadStoreA_predicated(masm, /* is_store */ false, $dst$$FloatRegister,
$pg$$PRegister, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@ -467,7 +467,7 @@ instruct storeV_masked(vReg src, vmemA mem, pRegGov pg) %{
format %{ "storeV_masked $mem, $pg, $src" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $src);
loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ true, $src$$FloatRegister,
loadStoreA_predicated(masm, /* is_store */ true, $src$$FloatRegister,
$pg$$PRegister, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@ -4929,7 +4929,7 @@ instruct vloadmask_loadV(pReg dst, indirect mem, vReg tmp, rFlagsReg cr) %{
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
loadStoreA_predicated(C2_MacroAssembler(&cbuf), false, $tmp$$FloatRegister,
loadStoreA_predicated(masm, false, $tmp$$FloatRegister,
ptrue, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ sve_cmp(Assembler::NE, $dst$$PRegister, __ elemType_to_regVariant(bt),
@ -4950,7 +4950,7 @@ instruct vloadmask_loadV_masked(pReg dst, indirect mem, pRegGov pg,
// Load valid mask values which are boolean type, and extend them to the
// defined vector element type. Convert the vector to predicate.
BasicType bt = Matcher::vector_element_basic_type(this);
loadStoreA_predicated(C2_MacroAssembler(&cbuf), false, $tmp$$FloatRegister,
loadStoreA_predicated(masm, false, $tmp$$FloatRegister,
$pg$$PRegister, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ sve_cmp(Assembler::NE, $dst$$PRegister, __ elemType_to_regVariant(bt),
@ -4977,7 +4977,7 @@ instruct vloadmask_loadVMasked(pReg dst, vmemA mem, pRegGov pg, vReg tmp, rFlags
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
loadStoreA_predicated(C2_MacroAssembler(&cbuf), false, $tmp$$FloatRegister,
loadStoreA_predicated(masm, false, $tmp$$FloatRegister,
ptrue, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ sve_cmp(Assembler::NE, $dst$$PRegister, __ elemType_to_regVariant(bt),
@ -5005,7 +5005,7 @@ instruct vloadmask_loadVMasked_masked(pReg dst, vmemA mem, pRegGov pg1, pRegGov
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
loadStoreA_predicated(C2_MacroAssembler(&cbuf), false, $tmp$$FloatRegister,
loadStoreA_predicated(masm, false, $tmp$$FloatRegister,
$pg2$$PRegister, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ sve_cmp(Assembler::NE, $dst$$PRegister, __ elemType_to_regVariant(bt),
@ -5030,7 +5030,7 @@ instruct storeV_vstoremask(indirect mem, pReg src, immI_gt_1 esize, vReg tmp) %{
assert(type2aelembytes(bt) == (int)$esize$$constant, "unsupported type");
Assembler::SIMD_RegVariant size = __ elemBytes_to_regVariant($esize$$constant);
__ sve_cpy($tmp$$FloatRegister, size, $src$$PRegister, 1, false);
loadStoreA_predicated(C2_MacroAssembler(&cbuf), true, $tmp$$FloatRegister,
loadStoreA_predicated(masm, true, $tmp$$FloatRegister,
ptrue, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@ -5052,7 +5052,7 @@ instruct storeV_vstoremask_masked(indirect mem, pReg src, immI_gt_1 esize,
Assembler::SIMD_RegVariant size = __ elemType_to_regVariant(bt);
__ sve_cpy($tmp$$FloatRegister, size, $src$$PRegister, 1, false);
__ sve_gen_mask_imm($pgtmp$$PRegister, bt, Matcher::vector_length(this, $src));
loadStoreA_predicated(C2_MacroAssembler(&cbuf), true, $tmp$$FloatRegister,
loadStoreA_predicated(masm, true, $tmp$$FloatRegister,
$pgtmp$$PRegister, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@ -5078,7 +5078,7 @@ instruct storeVMasked_vstoremask(vmemA mem, pReg src, pRegGov pg, immI_gt_1 esiz
assert(type2aelembytes(bt) == (int)$esize$$constant, "unsupported type.");
Assembler::SIMD_RegVariant size = __ elemBytes_to_regVariant($esize$$constant);
__ sve_cpy($tmp$$FloatRegister, size, $src$$PRegister, 1, false);
loadStoreA_predicated(C2_MacroAssembler(&cbuf), true, $tmp$$FloatRegister,
loadStoreA_predicated(masm, true, $tmp$$FloatRegister,
ptrue, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@ -5105,7 +5105,7 @@ instruct storeVMasked_vstoremask_masked(vmemA mem, pReg src, pRegGov pg, immI_gt
Assembler::SIMD_RegVariant size = __ elemType_to_regVariant(bt);
__ sve_cpy($tmp$$FloatRegister, size, $src$$PRegister, 1, false);
__ sve_gen_mask_imm($pgtmp$$PRegister, bt, Matcher::vector_length(this, $src));
loadStoreA_predicated(C2_MacroAssembler(&cbuf), true, $tmp$$FloatRegister,
loadStoreA_predicated(masm, true, $tmp$$FloatRegister,
$pgtmp$$PRegister, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}

View File

@ -84,7 +84,7 @@ source %{
PRegister Pg, const Address &adr);
// Predicated load/store, with optional ptrue to all elements of given predicate register.
static void loadStoreA_predicated(C2_MacroAssembler masm, bool is_store, FloatRegister reg,
static void loadStoreA_predicated(C2_MacroAssembler* masm, bool is_store, FloatRegister reg,
PRegister pg, BasicType mem_elem_bt, BasicType vector_elem_bt,
int opcode, Register base, int index, int size, int disp) {
sve_mem_insn_predicate insn;
@ -109,7 +109,7 @@ source %{
ShouldNotReachHere();
}
int imm4 = disp / mesize / Matcher::scalable_vector_reg_size(vector_elem_bt);
(masm.*insn)(reg, Assembler::elemType_to_regVariant(vector_elem_bt), pg, Address(base, imm4));
(masm->*insn)(reg, Assembler::elemType_to_regVariant(vector_elem_bt), pg, Address(base, imm4));
} else {
assert(false, "unimplemented");
ShouldNotReachHere();
@ -361,7 +361,7 @@ instruct loadV(vReg dst, vmemA mem) %{
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ false,
loadStoreA_predicated(masm, /* is_store */ false,
$dst$$FloatRegister, ptrue, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@ -378,7 +378,7 @@ instruct storeV(vReg src, vmemA mem) %{
BasicType bt = Matcher::vector_element_basic_type(this, $src);
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $src);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ true,
loadStoreA_predicated(masm, /* is_store */ true,
$src$$FloatRegister, ptrue, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@ -393,7 +393,7 @@ instruct loadV_masked(vReg dst, vmemA mem, pRegGov pg) %{
format %{ "loadV_masked $dst, $pg, $mem" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ false, $dst$$FloatRegister,
loadStoreA_predicated(masm, /* is_store */ false, $dst$$FloatRegister,
$pg$$PRegister, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@ -406,7 +406,7 @@ instruct storeV_masked(vReg src, vmemA mem, pRegGov pg) %{
format %{ "storeV_masked $mem, $pg, $src" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $src);
loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ true, $src$$FloatRegister,
loadStoreA_predicated(masm, /* is_store */ true, $src$$FloatRegister,
$pg$$PRegister, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@ -3321,7 +3321,7 @@ instruct vloadmask_loadV(pReg dst, indirect mem, vReg tmp, rFlagsReg cr) %{
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
loadStoreA_predicated(C2_MacroAssembler(&cbuf), false, $tmp$$FloatRegister,
loadStoreA_predicated(masm, false, $tmp$$FloatRegister,
ptrue, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ sve_cmp(Assembler::NE, $dst$$PRegister, __ elemType_to_regVariant(bt),
@ -3342,7 +3342,7 @@ instruct vloadmask_loadV_masked(pReg dst, indirect mem, pRegGov pg,
// Load valid mask values which are boolean type, and extend them to the
// defined vector element type. Convert the vector to predicate.
BasicType bt = Matcher::vector_element_basic_type(this);
loadStoreA_predicated(C2_MacroAssembler(&cbuf), false, $tmp$$FloatRegister,
loadStoreA_predicated(masm, false, $tmp$$FloatRegister,
$pg$$PRegister, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ sve_cmp(Assembler::NE, $dst$$PRegister, __ elemType_to_regVariant(bt),
@ -3369,7 +3369,7 @@ instruct vloadmask_loadVMasked(pReg dst, vmemA mem, pRegGov pg, vReg tmp, rFlags
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
loadStoreA_predicated(C2_MacroAssembler(&cbuf), false, $tmp$$FloatRegister,
loadStoreA_predicated(masm, false, $tmp$$FloatRegister,
ptrue, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ sve_cmp(Assembler::NE, $dst$$PRegister, __ elemType_to_regVariant(bt),
@ -3397,7 +3397,7 @@ instruct vloadmask_loadVMasked_masked(pReg dst, vmemA mem, pRegGov pg1, pRegGov
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
loadStoreA_predicated(C2_MacroAssembler(&cbuf), false, $tmp$$FloatRegister,
loadStoreA_predicated(masm, false, $tmp$$FloatRegister,
$pg2$$PRegister, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ sve_cmp(Assembler::NE, $dst$$PRegister, __ elemType_to_regVariant(bt),
@ -3422,7 +3422,7 @@ instruct storeV_vstoremask(indirect mem, pReg src, immI_gt_1 esize, vReg tmp) %{
assert(type2aelembytes(bt) == (int)$esize$$constant, "unsupported type");
Assembler::SIMD_RegVariant size = __ elemBytes_to_regVariant($esize$$constant);
__ sve_cpy($tmp$$FloatRegister, size, $src$$PRegister, 1, false);
loadStoreA_predicated(C2_MacroAssembler(&cbuf), true, $tmp$$FloatRegister,
loadStoreA_predicated(masm, true, $tmp$$FloatRegister,
ptrue, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@ -3444,7 +3444,7 @@ instruct storeV_vstoremask_masked(indirect mem, pReg src, immI_gt_1 esize,
Assembler::SIMD_RegVariant size = __ elemType_to_regVariant(bt);
__ sve_cpy($tmp$$FloatRegister, size, $src$$PRegister, 1, false);
__ sve_gen_mask_imm($pgtmp$$PRegister, bt, Matcher::vector_length(this, $src));
loadStoreA_predicated(C2_MacroAssembler(&cbuf), true, $tmp$$FloatRegister,
loadStoreA_predicated(masm, true, $tmp$$FloatRegister,
$pgtmp$$PRegister, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@ -3470,7 +3470,7 @@ instruct storeVMasked_vstoremask(vmemA mem, pReg src, pRegGov pg, immI_gt_1 esiz
assert(type2aelembytes(bt) == (int)$esize$$constant, "unsupported type.");
Assembler::SIMD_RegVariant size = __ elemBytes_to_regVariant($esize$$constant);
__ sve_cpy($tmp$$FloatRegister, size, $src$$PRegister, 1, false);
loadStoreA_predicated(C2_MacroAssembler(&cbuf), true, $tmp$$FloatRegister,
loadStoreA_predicated(masm, true, $tmp$$FloatRegister,
ptrue, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@ -3497,7 +3497,7 @@ instruct storeVMasked_vstoremask_masked(vmemA mem, pReg src, pRegGov pg, immI_gt
Assembler::SIMD_RegVariant size = __ elemType_to_regVariant(bt);
__ sve_cpy($tmp$$FloatRegister, size, $src$$PRegister, 1, false);
__ sve_gen_mask_imm($pgtmp$$PRegister, bt, Matcher::vector_length(this, $src));
loadStoreA_predicated(C2_MacroAssembler(&cbuf), true, $tmp$$FloatRegister,
loadStoreA_predicated(masm, true, $tmp$$FloatRegister,
$pgtmp$$PRegister, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}

View File

@ -29,7 +29,7 @@ define(choose, `loadStore($1, &MacroAssembler::$3, $2, $4,
%}')dnl
define(access, `
$3Register $1_reg = as_$3Register($$1$$reg);
$4choose(C2_MacroAssembler(&cbuf), $1_reg,$2,$mem->opcode(),
$4choose(masm, $1_reg,$2,$mem->opcode(),
as_Register($mem$$base),$mem$$index,$mem$$scale,$mem$$disp,$5)')dnl
define(load,`
// This encoding class is generated automatically from ad_encode.m4.
@ -59,8 +59,7 @@ define(STORE0,`
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_$2`'0(memory$4 mem) %{
C2_MacroAssembler _masm(&cbuf);
choose(_masm,zr,$2,$mem->opcode(),
choose(masm,zr,$2,$mem->opcode(),
as_$3Register($mem$$base),$mem$$index,$mem$$scale,$mem$$disp,$4)')dnl
STORE(iRegI,strb,,,1)
STORE0(iRegI,strb,,1)
@ -72,7 +71,6 @@ STORE(iRegL,str,,
`// we sometimes get asked to store the stack pointer into the
// current thread -- we cannot do that directly on AArch64
if (src_reg == r31_sp) {
C2_MacroAssembler _masm(&cbuf);
assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
__ mov(rscratch2, sp);
src_reg = rscratch2;
@ -85,8 +83,7 @@ STORE(vRegD,strd,Float,,8)
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
C2_MacroAssembler _masm(&cbuf);
__ membar(Assembler::StoreStore);
loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}

View File

@ -34,10 +34,10 @@
// ----------------------------------------------------------------------------
#define __ _masm.
address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
precond(cbuf.stubs()->start() != badAddress);
precond(cbuf.stubs()->end() != badAddress);
#define __ masm->
address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address mark) {
precond(__ code()->stubs()->start() != badAddress);
precond(__ code()->stubs()->end() != badAddress);
// Stub is fixed up when the corresponding call is converted from
// calling compiled code to calling interpreted code.
@ -45,13 +45,9 @@ address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
// jmp -4 # to self
if (mark == nullptr) {
mark = cbuf.insts_mark(); // Get mark within main instrs section.
mark = __ inst_mark(); // Get mark within main instrs section.
}
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a stub.
MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(to_interp_stub_size());
int offset = __ offset();
if (base == nullptr) {

View File

@ -29,20 +29,18 @@ source_hpp %{
encode %{
enc_class aarch64_enc_cmpxchg_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, iRegINoSp res) %{
MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ false, /*release*/ true, /*is_cae*/ false, $res$$Register);
%}
enc_class aarch64_enc_cmpxchg_acq_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, iRegINoSp res) %{
MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ true, /*release*/ true, /*is_cae*/ false, $res$$Register);
%}
%}
@ -77,7 +75,7 @@ instruct compareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, i
ins_encode %{
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ false, /*release*/ true, /*is_cae*/ false, $res$$Register);
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ false, /*release*/ true, /*is_cae*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
@ -115,7 +113,7 @@ instruct compareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval
ins_encode %{
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ true, /*release*/ true, /*is_cae*/ false, $res$$Register);
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ true, /*release*/ true, /*is_cae*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
@ -131,7 +129,7 @@ instruct compareAndExchangeN_shenandoah(iRegNNoSp res, indirect mem, iRegN oldva
ins_encode %{
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ false, /*release*/ true, /*is_cae*/ true, $res$$Register);
%}
ins_pipe(pipe_slow);
@ -147,7 +145,7 @@ instruct compareAndExchangeP_shenandoah(iRegPNoSp res, indirect mem, iRegP oldva
ins_encode %{
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ false, /*release*/ true, /*is_cae*/ true, $res$$Register);
%}
ins_pipe(pipe_slow);
@ -164,7 +162,7 @@ instruct compareAndExchangeNAcq_shenandoah(iRegNNoSp res, indirect mem, iRegN ol
ins_encode %{
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ true, /*release*/ true, /*is_cae*/ true, $res$$Register);
%}
ins_pipe(pipe_slow);
@ -181,7 +179,7 @@ instruct compareAndExchangePAcq_shenandoah(iRegPNoSp res, indirect mem, iRegP ol
ins_encode %{
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ true, /*release*/ true, /*is_cae*/ true, $res$$Register);
%}
ins_pipe(pipe_slow);
@ -199,7 +197,7 @@ instruct weakCompareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldva
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
// Weak is not currently supported by ShenandoahBarrierSet::cmpxchg_oop
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ false, /*release*/ true, /*is_cae*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
@ -216,7 +214,7 @@ instruct weakCompareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldva
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
// Weak is not currently supported by ShenandoahBarrierSet::cmpxchg_oop
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ false, /*release*/ true, /*is_cae*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
@ -235,7 +233,7 @@ instruct weakCompareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN ol
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
// Weak is not currently supported by ShenandoahBarrierSet::cmpxchg_oop
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ true, /*release*/ true, /*is_cae*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
@ -254,7 +252,7 @@ instruct weakCompareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP ol
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
// Weak is not currently supported by ShenandoahBarrierSet::cmpxchg_oop
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ true, /*release*/ true, /*is_cae*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -31,7 +31,7 @@ source_hpp %{
source %{
static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
if (barrier_data == XLoadBarrierElided) {
return;
}
@ -42,7 +42,7 @@ static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address
__ bind(*stub->continuation());
}
static void x_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
__ b(*stub->entry());
__ bind(*stub->continuation());
@ -64,7 +64,7 @@ instruct xLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
ins_encode %{
const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ ldr($dst$$Register, ref_addr);
x_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, barrier_data());
x_load_barrier(masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, barrier_data());
%}
ins_pipe(iload_reg_mem);
@ -83,7 +83,7 @@ instruct xLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg
ins_encode %{
__ ldar($dst$$Register, $mem$$Register);
x_load_barrier(_masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, barrier_data());
x_load_barrier(masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, barrier_data());
%}
ins_pipe(pipe_serial);
@ -110,7 +110,7 @@ instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva
__ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(rscratch1, rscratch1, rscratch2);
__ cbz(rscratch1, good);
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */);
x_load_barrier_slow_path(masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
false /* acquire */, true /* release */, false /* weak */, rscratch2);
__ cset($res$$Register, Assembler::EQ);
@ -142,7 +142,7 @@ instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
__ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(rscratch1, rscratch1, rscratch2);
__ cbz(rscratch1, good);
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ );
x_load_barrier_slow_path(masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ );
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
true /* acquire */, true /* release */, false /* weak */, rscratch2);
__ cset($res$$Register, Assembler::EQ);
@ -171,7 +171,7 @@ instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n
__ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(rscratch1, rscratch1, $res$$Register);
__ cbz(rscratch1, good);
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
false /* acquire */, true /* release */, false /* weak */, $res$$Register);
__ bind(good);
@ -199,7 +199,7 @@ instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
__ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(rscratch1, rscratch1, $res$$Register);
__ cbz(rscratch1, good);
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
true /* acquire */, true /* release */, false /* weak */, $res$$Register);
__ bind(good);
@ -220,7 +220,7 @@ instruct xGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
ins_encode %{
__ atomic_xchg($prev$$Register, $newv$$Register, $mem$$Register);
x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data());
x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data());
%}
ins_pipe(pipe_serial);
@ -237,7 +237,7 @@ instruct xGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr)
ins_encode %{
__ atomic_xchgal($prev$$Register, $newv$$Register, $mem$$Register);
x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data());
x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data());
%}
ins_pipe(pipe_serial);
%}

View File

@ -33,40 +33,40 @@ source %{
#include "gc/z/zBarrierSetAssembler.hpp"
static void z_color(MacroAssembler& _masm, const MachNode* node, Register dst, Register src) {
static void z_color(MacroAssembler* masm, const MachNode* node, Register dst, Register src) {
assert_different_registers(src, dst);
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBeforeMov);
__ movzw(dst, barrier_Relocation::unpatched);
__ orr(dst, dst, src, Assembler::LSL, ZPointerLoadShift);
}
static void z_uncolor(MacroAssembler& _masm, const MachNode* node, Register ref) {
static void z_uncolor(MacroAssembler* masm, const MachNode* node, Register ref) {
__ lsr(ref, ref, ZPointerLoadShift);
}
static void z_keep_alive_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
static void z_keep_alive_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadBeforeMov);
__ movzw(tmp, barrier_Relocation::unpatched);
__ tst(ref, tmp);
ZLoadBarrierStubC2Aarch64* const stub = ZLoadBarrierStubC2Aarch64::create(node, ref_addr, ref);
__ br(Assembler::NE, *stub->entry());
z_uncolor(_masm, node, ref);
z_uncolor(masm, node, ref);
__ bind(*stub->continuation());
}
static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
static void z_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
const bool on_non_strong =
((node->barrier_data() & ZBarrierWeak) != 0) ||
((node->barrier_data() & ZBarrierPhantom) != 0);
if (on_non_strong) {
z_keep_alive_load_barrier(_masm, node, ref_addr, ref, tmp);
z_keep_alive_load_barrier(masm, node, ref_addr, ref, tmp);
return;
}
if (node->barrier_data() == ZBarrierElided) {
z_uncolor(_masm, node, ref);
z_uncolor(masm, node, ref);
return;
}
@ -81,19 +81,19 @@ static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address
__ b(*stub->entry());
__ bind(good);
}
z_uncolor(_masm, node, ref);
z_uncolor(masm, node, ref);
__ bind(*stub->continuation());
}
static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, Register tmp, bool is_atomic) {
Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, Register tmp, bool is_atomic) {
Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
if (node->barrier_data() == ZBarrierElided) {
z_color(_masm, node, rnew_zpointer, rnew_zaddress);
z_color(masm, node, rnew_zpointer, rnew_zaddress);
} else {
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
ZStoreBarrierStubC2Aarch64* const stub = ZStoreBarrierStubC2Aarch64::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic);
ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler();
bs_asm->store_barrier_fast(&_masm, ref_addr, rnew_zaddress, rnew_zpointer, tmp, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
bs_asm->store_barrier_fast(masm, ref_addr, rnew_zaddress, rnew_zpointer, tmp, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
}
}
@ -113,7 +113,7 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
ins_encode %{
const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ ldr($dst$$Register, ref_addr);
z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch1);
z_load_barrier(masm, this, ref_addr, $dst$$Register, rscratch1);
%}
ins_pipe(iload_reg_mem);
@ -133,7 +133,7 @@ instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg
ins_encode %{
const Address ref_addr = Address($mem$$Register);
__ ldar($dst$$Register, $mem$$Register);
z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch1);
z_load_barrier(masm, this, ref_addr, $dst$$Register, rscratch1);
%}
ins_pipe(pipe_serial);
@ -150,7 +150,7 @@ instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr)
format %{ "movq $mem, $src\t# ptr" %}
ins_encode %{
const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
z_store_barrier(_masm, this, ref_addr, $src$$Register, $tmp$$Register, rscratch2, false /* is_atomic */);
z_store_barrier(masm, this, ref_addr, $src$$Register, $tmp$$Register, rscratch2, false /* is_atomic */);
__ str($tmp$$Register, ref_addr);
%}
ins_pipe(pipe_serial);
@ -167,7 +167,7 @@ instruct zStorePVolatile(indirect mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr)
format %{ "movq $mem, $src\t# ptr" %}
ins_encode %{
const Address ref_addr = Address($mem$$Register);
z_store_barrier(_masm, this, ref_addr, $src$$Register, $tmp$$Register, rscratch2, false /* is_atomic */);
z_store_barrier(masm, this, ref_addr, $src$$Register, $tmp$$Register, rscratch2, false /* is_atomic */);
__ stlr($tmp$$Register, $mem$$Register);
%}
ins_pipe(pipe_serial);
@ -187,8 +187,8 @@ instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva
ins_encode %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
Address ref_addr($mem$$Register);
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register);
z_store_barrier(masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
z_color(masm, this, $oldval_tmp$$Register, $oldval$$Register);
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::xword,
false /* acquire */, true /* release */, false /* weak */, noreg);
__ cset($res$$Register, Assembler::EQ);
@ -211,8 +211,8 @@ instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
ins_encode %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
Address ref_addr($mem$$Register);
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register);
z_store_barrier(masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
z_color(masm, this, $oldval_tmp$$Register, $oldval$$Register);
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::xword,
true /* acquire */, true /* release */, false /* weak */, noreg);
__ cset($res$$Register, Assembler::EQ);
@ -235,11 +235,11 @@ instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n
ins_encode %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
Address ref_addr($mem$$Register);
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register);
z_store_barrier(masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
z_color(masm, this, $oldval_tmp$$Register, $oldval$$Register);
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::xword,
false /* acquire */, true /* release */, false /* weak */, $res$$Register);
z_uncolor(_masm, this, $res$$Register);
z_uncolor(masm, this, $res$$Register);
%}
ins_pipe(pipe_slow);
@ -258,11 +258,11 @@ instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
ins_encode %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
Address ref_addr($mem$$Register);
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register);
z_store_barrier(masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
z_color(masm, this, $oldval_tmp$$Register, $oldval$$Register);
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::xword,
true /* acquire */, true /* release */, false /* weak */, $res$$Register);
z_uncolor(_masm, this, $res$$Register);
z_uncolor(masm, this, $res$$Register);
%}
ins_pipe(pipe_slow);
@ -278,9 +278,9 @@ instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
format %{ "atomic_xchg $prev, $newv, [$mem]" %}
ins_encode %{
z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, rscratch2, true /* is_atomic */);
z_store_barrier(masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, rscratch2, true /* is_atomic */);
__ atomic_xchg($prev$$Register, $prev$$Register, $mem$$Register);
z_uncolor(_masm, this, $prev$$Register);
z_uncolor(masm, this, $prev$$Register);
%}
ins_pipe(pipe_serial);
@ -296,9 +296,9 @@ instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr)
format %{ "atomic_xchg $prev, $newv, [$mem]" %}
ins_encode %{
z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, rscratch2, true /* is_atomic */);
z_store_barrier(masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, rscratch2, true /* is_atomic */);
__ atomic_xchgal($prev$$Register, $prev$$Register, $mem$$Register);
z_uncolor(_masm, this, $prev$$Register);
z_uncolor(masm, this, $prev$$Register);
%}
ins_pipe(pipe_serial);

View File

@ -1108,8 +1108,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ b(exit);
CodeBuffer* cbuf = masm->code_section()->outer();
address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@ -1173,8 +1172,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ br(r1); // the exception handler
}
CodeBuffer* cbuf = masm->code_section()->outer();
address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}

View File

@ -105,8 +105,8 @@ class HandlerImpl {
public:
static int emit_exception_handler(CodeBuffer &cbuf);
static int emit_deopt_handler(CodeBuffer& cbuf);
static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
return ( 3 * 4 );
@ -138,7 +138,7 @@ bool assert_not_var_shift(const Node *n) {
return true;
}
#define __ _masm.
#define __ masm->
static FloatRegister reg_to_FloatRegister_object(int register_encoding);
static Register reg_to_register_object(int register_encoding);
@ -159,8 +159,7 @@ int MachNode::compute_padding(int current_offset) const {
// REQUIRED FUNCTIONALITY
// emit an interrupt that is caught by the debugger (for debugging compiler)
void emit_break(CodeBuffer &cbuf) {
C2_MacroAssembler _masm(&cbuf);
void emit_break(C2_MacroAssembler *masm) {
__ breakpoint();
}
@ -170,8 +169,8 @@ void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const {
}
#endif
void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
emit_break(cbuf);
void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
emit_break(masm);
}
uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
@ -179,16 +178,14 @@ uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
}
void emit_nop(CodeBuffer &cbuf) {
C2_MacroAssembler _masm(&cbuf);
void emit_nop(C2_MacroAssembler *masm) {
__ nop();
}
void emit_call_reloc(CodeBuffer &cbuf, const MachCallNode *n, MachOper *m, RelocationHolder const& rspec) {
void emit_call_reloc(C2_MacroAssembler *masm, const MachCallNode *n, MachOper *m, RelocationHolder const& rspec) {
int ret_addr_offset0 = n->as_MachCall()->ret_addr_offset();
int call_site_offset = cbuf.insts()->mark_off();
C2_MacroAssembler _masm(&cbuf);
int call_site_offset = __ code()->insts()->mark_off();
__ set_inst_mark(); // needed in emit_to_interp_stub() to locate the call
address target = (address)m->method();
assert(n->as_MachCall()->entry_point() == target, "sanity");
@ -210,8 +207,8 @@ void emit_call_reloc(CodeBuffer &cbuf, const MachCallNode *n, MachOper *m, Reloc
//=============================================================================
// REQUIRED FUNCTIONALITY for encoding
void emit_lo(CodeBuffer &cbuf, int val) { }
void emit_hi(CodeBuffer &cbuf, int val) { }
void emit_lo(C2_MacroAssembler *masm, int val) { }
void emit_hi(C2_MacroAssembler *masm, int val) { }
//=============================================================================
@ -232,10 +229,9 @@ void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, Phase
ShouldNotReachHere();
}
void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
Compile* C = ra_->C;
ConstantTable& constant_table = C->output()->constant_table();
C2_MacroAssembler _masm(&cbuf);
Register r = as_Register(ra_->get_encode(this));
CodeSection* consts_section = __ code()->consts();
@ -303,9 +299,8 @@ void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
}
#endif
void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
C2_MacroAssembler _masm(&cbuf);
for (int i = 0; i < OptoPrologueNops; i++) {
__ nop();
@ -334,11 +329,11 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
if (C->stub_function() == nullptr) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->nmethod_entry_barrier(&_masm);
bs->nmethod_entry_barrier(masm);
}
// offset from scratch buffer is not valid
if (strcmp(cbuf.name(), "Compile::Fill_buffer") == 0) {
if (strcmp(__ code()->name(), "Compile::Fill_buffer") == 0) {
C->output()->set_frame_complete( __ offset() );
}
@ -379,8 +374,7 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
}
#endif
void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
size_t framesize = C->output()->frame_size_in_bytes();
@ -430,7 +424,7 @@ static inline bool is_iRegLd_memhd(OptoReg::Name src_first, OptoReg::Name src_se
return (rlo&1)==0 && (rlo+1 == rhi) && is_memoryHD(offset);
}
uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
uint MachSpillCopyNode::implementation( C2_MacroAssembler *masm,
PhaseRegAlloc *ra_,
bool do_size,
outputStream* st ) const {
@ -463,14 +457,12 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
// Bailout only for real instruction emit.
// This requires a single comment change in shared code. ( see output.cpp "Normal" instruction case )
C2_MacroAssembler _masm(cbuf);
// --------------------------------------
// Check for mem-mem move. Load into unused float registers and fall into
// the float-store case.
if (src_first_rc == rc_stack && dst_first_rc == rc_stack) {
int offset = ra_->reg2offset(src_first);
if (cbuf && !is_memoryfp(offset)) {
if (masm && !is_memoryfp(offset)) {
ra_->C->record_method_not_compilable("unable to handle large constant offsets");
return 0;
} else {
@ -480,7 +472,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
src_second = OptoReg::Name(R_mem_copy_hi_num);
src_first_rc = rc_float;
src_second_rc = rc_float;
if (cbuf) {
if (masm) {
__ ldr_double(Rmemcopy, Address(SP, offset));
} else if (!do_size) {
st->print(LDR_DOUBLE " R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first),offset);
@ -488,7 +480,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
} else {
src_first = OptoReg::Name(R_mem_copy_lo_num);
src_first_rc = rc_float;
if (cbuf) {
if (masm) {
__ ldr_float(Rmemcopy, Address(SP, offset));
} else if (!do_size) {
st->print(LDR_FLOAT " R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first),offset);
@ -507,7 +499,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
if (src_first_rc == rc_int && dst_first_rc == rc_int) {
// Else normal reg-reg copy
assert( src_second != dst_first, "smashed second before evacuating it" );
if (cbuf) {
if (masm) {
__ mov(reg_to_register_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else if (!do_size) {
@ -522,13 +514,13 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
// Check for integer store
if (src_first_rc == rc_int && dst_first_rc == rc_stack) {
int offset = ra_->reg2offset(dst_first);
if (cbuf && !is_memoryI(offset)) {
if (masm && !is_memoryI(offset)) {
ra_->C->record_method_not_compilable("unable to handle large constant offsets");
return 0;
} else {
if (src_second_rc != rc_bad && is_iRegLd_memhd(src_first, src_second, offset)) {
assert((src_first&1)==0 && src_first+1 == src_second, "pair of registers must be aligned/contiguous");
if (cbuf) {
if (masm) {
__ str_64(reg_to_register_object(Matcher::_regEncode[src_first]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@ -538,7 +530,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
}
return size + 4;
} else {
if (cbuf) {
if (masm) {
__ str_32(reg_to_register_object(Matcher::_regEncode[src_first]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@ -554,13 +546,13 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
// Check for integer load
if (dst_first_rc == rc_int && src_first_rc == rc_stack) {
int offset = ra_->reg2offset(src_first);
if (cbuf && !is_memoryI(offset)) {
if (masm && !is_memoryI(offset)) {
ra_->C->record_method_not_compilable("unable to handle large constant offsets");
return 0;
} else {
if (src_second_rc != rc_bad && is_iRegLd_memhd(dst_first, dst_second, offset)) {
assert((src_first&1)==0 && src_first+1 == src_second, "pair of registers must be aligned/contiguous");
if (cbuf) {
if (masm) {
__ ldr_64(reg_to_register_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@ -570,7 +562,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
}
return size + 4;
} else {
if (cbuf) {
if (masm) {
__ ldr_32(reg_to_register_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@ -587,7 +579,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
if (src_first_rc == rc_float && dst_first_rc == rc_float) {
if (src_second_rc != rc_bad) {
assert((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers must be aligned/contiguous");
if (cbuf) {
if (masm) {
__ mov_double(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else if (!do_size) {
@ -598,7 +590,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
}
return 4;
}
if (cbuf) {
if (masm) {
__ mov_float(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else if (!do_size) {
@ -613,14 +605,14 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
// Check for float store
if (src_first_rc == rc_float && dst_first_rc == rc_stack) {
int offset = ra_->reg2offset(dst_first);
if (cbuf && !is_memoryfp(offset)) {
if (masm && !is_memoryfp(offset)) {
ra_->C->record_method_not_compilable("unable to handle large constant offsets");
return 0;
} else {
// Further check for aligned-adjacent pair, so we can use a double store
if (src_second_rc != rc_bad) {
assert((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers and stack slots must be aligned/contiguous");
if (cbuf) {
if (masm) {
__ str_double(reg_to_FloatRegister_object(Matcher::_regEncode[src_first]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@ -630,7 +622,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
}
return size + 4;
} else {
if (cbuf) {
if (masm) {
__ str_float(reg_to_FloatRegister_object(Matcher::_regEncode[src_first]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@ -646,14 +638,14 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
// Check for float load
if (dst_first_rc == rc_float && src_first_rc == rc_stack) {
int offset = ra_->reg2offset(src_first);
if (cbuf && !is_memoryfp(offset)) {
if (masm && !is_memoryfp(offset)) {
ra_->C->record_method_not_compilable("unable to handle large constant offsets");
return 0;
} else {
// Further check for aligned-adjacent pair, so we can use a double store
if (src_second_rc != rc_bad) {
assert((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers and stack slots must be aligned/contiguous");
if (cbuf) {
if (masm) {
__ ldr_double(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@ -663,7 +655,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
}
return size + 4;
} else {
if (cbuf) {
if (masm) {
__ ldr_float(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@ -683,7 +675,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
assert((dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers must be aligned/contiguous");
assert((src_first&1)==0 && src_first+1 == src_second, "pairs of registers must be aligned/contiguous");
assert(src_second_rc == rc_int && dst_second_rc == rc_float, "unsupported");
if (cbuf) {
if (masm) {
__ fmdrr(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[src_first]), reg_to_register_object(Matcher::_regEncode[src_second]));
#ifndef PRODUCT
} else if (!do_size) {
@ -693,7 +685,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
}
return size + 4;
} else {
if (cbuf) {
if (masm) {
__ fmsr(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else if (!do_size) {
@ -712,7 +704,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
assert((src_first&1)==0 && src_first+1 == src_second, "pairs of registers must be aligned/contiguous");
assert((dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers must be aligned/contiguous");
assert(src_second_rc == rc_float && dst_second_rc == rc_int, "unsupported");
if (cbuf) {
if (masm) {
__ fmrrd(reg_to_register_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[dst_second]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else if (!do_size) {
@ -722,7 +714,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
}
return size + 4;
} else {
if (cbuf) {
if (masm) {
__ fmrs(reg_to_register_object(Matcher::_regEncode[dst_first]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else if (!do_size) {
@ -746,7 +738,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
// register (else it's a hi-bits-to-hi-bits copy which should have
// happened already as part of a 64-bit move)
if (src_second_rc == rc_int && dst_second_rc == rc_int) {
if (cbuf) {
if (masm) {
__ mov(reg_to_register_object(Matcher::_regEncode[dst_second]), reg_to_register_object(Matcher::_regEncode[src_second]));
#ifndef PRODUCT
} else if (!do_size) {
@ -763,11 +755,11 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
if (src_second_rc == rc_int && dst_second_rc == rc_stack) {
int offset = ra_->reg2offset(dst_second);
if (cbuf && !is_memoryP(offset)) {
if (masm && !is_memoryP(offset)) {
ra_->C->record_method_not_compilable("unable to handle large constant offsets");
return 0;
} else {
if (cbuf) {
if (masm) {
__ str(reg_to_register_object(Matcher::_regEncode[src_second]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@ -782,11 +774,11 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
// Check for high word integer load
if (dst_second_rc == rc_int && src_second_rc == rc_stack) {
int offset = ra_->reg2offset(src_second);
if (cbuf && !is_memoryP(offset)) {
if (masm && !is_memoryP(offset)) {
ra_->C->record_method_not_compilable("unable to handle large constant offsets");
return 0;
} else {
if (cbuf) {
if (masm) {
__ ldr(reg_to_register_object(Matcher::_regEncode[dst_second]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@ -804,12 +796,12 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
#ifndef PRODUCT
void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
implementation(nullptr, ra_, false, st );
implementation( nullptr, ra_, false, st );
}
#endif
void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
implementation( &cbuf, ra_, false, nullptr );
void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
implementation( masm, ra_, false, nullptr );
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
@ -823,8 +815,7 @@ void MachNopNode::format( PhaseRegAlloc *, outputStream *st ) const {
}
#endif
void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
C2_MacroAssembler _masm(&cbuf);
void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc * ) const {
for(int i = 0; i < _count; i += 1) {
__ nop();
}
@ -844,8 +835,7 @@ void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
}
#endif
void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_encode(this);
Register dst = reg_to_register_object(reg);
@ -875,8 +865,7 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
}
#endif
void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
void MachUEPNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
__ ic_check(InteriorEntryAlignment);
}
@ -888,9 +877,7 @@ uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
//=============================================================================
// Emit exception handler code.
int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
C2_MacroAssembler _masm(&cbuf);
int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm) {
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@ -909,11 +896,9 @@ int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
return offset;
}
int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
// Can't use any of the current frame's registers as we may have deopted
// at a poll and everything can be live.
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_deopt_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@ -1208,13 +1193,25 @@ bool maybe_far_call(const MachCallNode *n) {
// tertiary opcode. Only the opcode sections which a particular instruction
// needs for encoding need to be specified.
encode %{
// Set instruction mark in MacroAssembler. This is used only in
// instructions that emit bytes directly to the CodeBuffer wraped
// in the MacroAssembler. Should go away once all "instruct" are
// patched to emit bytes only using methods in MacroAssembler.
enc_class SetInstMark %{
__ set_inst_mark();
%}
enc_class ClearInstMark %{
__ clear_inst_mark();
%}
enc_class call_epilog %{
// nothing
%}
enc_class Java_To_Runtime (method meth) %{
// CALL directly to the runtime
emit_call_reloc(cbuf, as_MachCall(), $meth, runtime_call_Relocation::spec());
emit_call_reloc(masm, as_MachCall(), $meth, runtime_call_Relocation::spec());
%}
enc_class Java_Static_Call (method meth) %{
@ -1222,15 +1219,15 @@ encode %{
// who we intended to call.
if ( !_method) {
emit_call_reloc(cbuf, as_MachCall(), $meth, runtime_call_Relocation::spec());
emit_call_reloc(masm, as_MachCall(), $meth, runtime_call_Relocation::spec());
} else {
int method_index = resolved_method_index(cbuf);
int method_index = resolved_method_index(masm);
RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
: static_call_Relocation::spec(method_index);
emit_call_reloc(cbuf, as_MachCall(), $meth, rspec);
emit_call_reloc(masm, as_MachCall(), $meth, rspec);
// Emit stubs for static call.
address stub = CompiledDirectCall::emit_to_interp_stub(cbuf);
address stub = CompiledDirectCall::emit_to_interp_stub(masm);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@ -1240,39 +1237,35 @@ encode %{
enc_class save_last_PC %{
// preserve mark
address mark = cbuf.insts()->mark();
debug_only(int off0 = cbuf.insts_size());
C2_MacroAssembler _masm(&cbuf);
address mark = __ inst_mark();
debug_only(int off0 = __ offset());
int ret_addr_offset = as_MachCall()->ret_addr_offset();
__ adr(LR, mark + ret_addr_offset);
__ str(LR, Address(Rthread, JavaThread::last_Java_pc_offset()));
debug_only(int off1 = cbuf.insts_size());
debug_only(int off1 = __ offset());
assert(off1 - off0 == 2 * Assembler::InstructionSize, "correct size prediction");
// restore mark
cbuf.insts()->set_mark(mark);
__ set_inst_mark(mark);
%}
enc_class preserve_SP %{
// preserve mark
address mark = cbuf.insts()->mark();
debug_only(int off0 = cbuf.insts_size());
C2_MacroAssembler _masm(&cbuf);
address mark = __ inst_mark();
debug_only(int off0 = __ offset());
// FP is preserved across all calls, even compiled calls.
// Use it to preserve SP in places where the callee might change the SP.
__ mov(Rmh_SP_save, SP);
debug_only(int off1 = cbuf.insts_size());
debug_only(int off1 = __ offset());
assert(off1 - off0 == 4, "correct size prediction");
// restore mark
cbuf.insts()->set_mark(mark);
__ set_inst_mark(mark);
%}
enc_class restore_SP %{
C2_MacroAssembler _masm(&cbuf);
__ mov(SP, Rmh_SP_save);
%}
enc_class Java_Dynamic_Call (method meth) %{
C2_MacroAssembler _masm(&cbuf);
Register R8_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
assert(R8_ic_reg == Ricklass, "should be");
__ set_inst_mark();
@ -1281,9 +1274,9 @@ encode %{
address virtual_call_oop_addr = __ inst_mark();
// CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
// who we intended to call.
int method_index = resolved_method_index(cbuf);
int method_index = resolved_method_index(masm);
__ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr, method_index));
emit_call_reloc(cbuf, as_MachCall(), $meth, RelocationHolder::none);
emit_call_reloc(masm, as_MachCall(), $meth, RelocationHolder::none);
%}
enc_class LdReplImmI(immI src, regD dst, iRegI tmp, int cnt, int wth) %{
@ -1300,7 +1293,6 @@ encode %{
val |= (val << bit_width);
}
}
C2_MacroAssembler _masm(&cbuf);
if (val == -1) {
__ mvn($tmp$$Register, 0);
@ -1317,7 +1309,6 @@ encode %{
// Replicate float con 2 times and pack into vector (8 bytes) in regD.
float fval = $src$$constant;
int val = *((int*)&fval);
C2_MacroAssembler _masm(&cbuf);
if (val == -1) {
__ mvn($tmp$$Register, 0);
@ -1332,7 +1323,6 @@ encode %{
enc_class enc_String_Compare(R0RegP str1, R1RegP str2, R2RegI cnt1, R3RegI cnt2, iRegI result, iRegI tmp1, iRegI tmp2) %{
Label Ldone, Lloop;
C2_MacroAssembler _masm(&cbuf);
Register str1_reg = $str1$$Register;
Register str2_reg = $str2$$Register;
@ -1424,7 +1414,6 @@ encode %{
enc_class enc_String_Equals(R0RegP str1, R1RegP str2, R2RegI cnt, iRegI result, iRegI tmp1, iRegI tmp2) %{
Label Lchar, Lchar_loop, Ldone, Lequal;
C2_MacroAssembler _masm(&cbuf);
Register str1_reg = $str1$$Register;
Register str2_reg = $str2$$Register;
@ -1486,7 +1475,6 @@ encode %{
enc_class enc_Array_Equals(R0RegP ary1, R1RegP ary2, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI result) %{
Label Ldone, Lloop, Lequal;
C2_MacroAssembler _masm(&cbuf);
Register ary1_reg = $ary1$$Register;
Register ary2_reg = $ary2$$Register;
@ -8847,7 +8835,7 @@ instruct CallStaticJavaDirect( method meth ) %{
ins_cost(CALL_COST);
format %{ "CALL,static ==> " %}
ins_encode( Java_Static_Call( meth ), call_epilog );
ins_encode( SetInstMark, Java_Static_Call( meth ), call_epilog, ClearInstMark );
ins_pipe(simple_call);
%}
@ -8861,7 +8849,7 @@ instruct CallStaticJavaHandle( method meth ) %{
ins_cost(CALL_COST);
format %{ "CALL,static/MethodHandle ==> " %}
ins_encode( preserve_SP, Java_Static_Call( meth ), restore_SP, call_epilog );
ins_encode( SetInstMark, preserve_SP, Java_Static_Call( meth ), restore_SP, call_epilog, ClearInstMark );
ins_pipe(simple_call);
%}
@ -8873,7 +8861,7 @@ instruct CallDynamicJavaDirect( method meth ) %{
ins_cost(CALL_COST);
format %{ "MOV_OOP (empty),R_R8\n\t"
"CALL,dynamic ; NOP ==> " %}
ins_encode( Java_Dynamic_Call( meth ), call_epilog );
ins_encode( SetInstMark, Java_Dynamic_Call( meth ), call_epilog, ClearInstMark );
ins_pipe(call);
%}
@ -8883,8 +8871,8 @@ instruct CallRuntimeDirect(method meth) %{
effect(USE meth);
ins_cost(CALL_COST);
format %{ "CALL,runtime" %}
ins_encode( Java_To_Runtime( meth ),
call_epilog );
ins_encode( SetInstMark, Java_To_Runtime( meth ),
call_epilog, ClearInstMark );
ins_pipe(simple_call);
%}
@ -8895,8 +8883,8 @@ instruct CallLeafDirect(method meth) %{
ins_cost(CALL_COST);
format %{ "CALL,runtime leaf" %}
// TODO: need save_last_PC here?
ins_encode( Java_To_Runtime( meth ),
call_epilog );
ins_encode( SetInstMark, Java_To_Runtime( meth ),
call_epilog, ClearInstMark );
ins_pipe(simple_call);
%}
@ -8907,8 +8895,8 @@ instruct CallLeafNoFPDirect(method meth) %{
ins_cost(CALL_COST);
format %{ "CALL,runtime leaf nofp" %}
// TODO: need save_last_PC here?
ins_encode( Java_To_Runtime( meth ),
call_epilog );
ins_encode( SetInstMark, Java_To_Runtime( meth ),
call_epilog, ClearInstMark );
ins_pipe(simple_call);
%}

View File

@ -34,20 +34,18 @@
// ----------------------------------------------------------------------------
#if COMPILER2_OR_JVMCI
#define __ _masm.
#define __ masm->
// emit call stub, compiled java to interpreter
address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address mark) {
// Stub is fixed up when the corresponding call is converted from calling
// compiled code to calling interpreted code.
// set (empty), R9
// b -1
if (mark == nullptr) {
mark = cbuf.insts_mark(); // get mark within main instrs section
mark = __ inst_mark(); // get mark within main instrs section
}
MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(to_interp_stub_size());
if (base == nullptr) {
return nullptr; // CodeBuffer::expand failed

View File

@ -77,18 +77,14 @@
// Usage of r1 and r2 in the stubs allows to distinguish them.
const int IC_pos_in_java_to_interp_stub = 8;
#define __ _masm.
address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
#define __ masm->
address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address mark/* = nullptr*/) {
#ifdef COMPILER2
if (mark == nullptr) {
// Get the mark within main instrs section which is set to the address of the call.
mark = cbuf.insts_mark();
mark = __ inst_mark();
}
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a stub.
MacroAssembler _masm(&cbuf);
// Start the stub.
address stub = __ start_a_stub(CompiledDirectCall::to_interp_stub_size());
if (stub == nullptr) {

View File

@ -44,7 +44,7 @@ instruct compareAndSwapP_shenandoah(iRegIdst res, indirect mem, iRegPsrc oldval,
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(
&_masm,
masm,
$mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
false, $res$$Register
@ -65,7 +65,7 @@ instruct compareAndSwapN_shenandoah(iRegIdst res, indirect mem, iRegNsrc oldval,
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(
&_masm,
masm,
$mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
false, $res$$Register
@ -86,7 +86,7 @@ instruct compareAndSwapP_acq_shenandoah(iRegIdst res, indirect mem, iRegPsrc old
format %{ "CMPXCHGD acq $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(
&_masm,
masm,
$mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
false, $res$$Register
@ -112,7 +112,7 @@ instruct compareAndSwapN_acq_shenandoah(iRegIdst res, indirect mem, iRegNsrc old
format %{ "CMPXCHGD acq $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(
&_masm,
masm,
$mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
false, $res$$Register
@ -137,7 +137,7 @@ instruct compareAndExchangeP_shenandoah(iRegPdst res, indirect mem, iRegPsrc old
format %{ "CMPXCHGD $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(
&_masm,
masm,
$mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
true, $res$$Register
@ -157,7 +157,7 @@ instruct compareAndExchangeN_shenandoah(iRegNdst res, indirect mem, iRegNsrc old
format %{ "CMPXCHGD $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(
&_masm,
masm,
$mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
true, $res$$Register
@ -177,7 +177,7 @@ instruct compareAndExchangePAcq_shenandoah(iRegPdst res, indirect mem, iRegPsrc
format %{ "CMPXCHGD acq $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(
&_masm,
masm,
$mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
true, $res$$Register
@ -202,7 +202,7 @@ instruct compareAndExchangeNAcq_shenandoah(iRegNdst res, indirect mem, iRegNsrc
format %{ "CMPXCHGD acq $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(
&_masm,
masm,
$mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
true, $res$$Register

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2021 SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
@ -32,7 +32,7 @@ source_hpp %{
source %{
static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref,
static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref,
Register tmp, uint8_t barrier_data) {
if (barrier_data == XLoadBarrierElided) {
return;
@ -45,14 +45,14 @@ static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address
__ bind(*stub->continuation());
}
static void x_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref,
static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref,
Register tmp) {
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
__ b(*stub->entry());
__ bind(*stub->continuation());
}
static void x_compare_and_swap(MacroAssembler& _masm, const MachNode* node,
static void x_compare_and_swap(MacroAssembler* masm, const MachNode* node,
Register res, Register mem, Register oldval, Register newval,
Register tmp_xchg, Register tmp_mask,
bool weak, bool acquire) {
@ -70,7 +70,7 @@ static void x_compare_and_swap(MacroAssembler& _masm, const MachNode* node,
__ beq(CCR0, skip_barrier);
// CAS must have failed because pointer in memory is bad.
x_load_barrier_slow_path(_masm, node, Address(mem), tmp_xchg, res /* used as tmp */);
x_load_barrier_slow_path(masm, node, Address(mem), tmp_xchg, res /* used as tmp */);
__ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem,
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, nullptr, true, weak);
@ -89,7 +89,7 @@ static void x_compare_and_swap(MacroAssembler& _masm, const MachNode* node,
}
}
static void x_compare_and_exchange(MacroAssembler& _masm, const MachNode* node,
static void x_compare_and_exchange(MacroAssembler* masm, const MachNode* node,
Register res, Register mem, Register oldval, Register newval, Register tmp,
bool weak, bool acquire) {
// z-specific load barrier requires strong CAS operations.
@ -104,7 +104,7 @@ static void x_compare_and_exchange(MacroAssembler& _masm, const MachNode* node,
__ and_(tmp, tmp, res);
__ beq(CCR0, skip_barrier);
x_load_barrier_slow_path(_masm, node, Address(mem), res, tmp);
x_load_barrier_slow_path(masm, node, Address(mem), res, tmp);
__ cmpxchgd(CCR0, res, oldval, newval, mem,
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, nullptr, true, weak);
@ -138,7 +138,7 @@ instruct xLoadP(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0)
ins_encode %{
assert($mem$$index == 0, "sanity");
__ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
x_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
x_load_barrier(masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
%}
ins_pipe(pipe_class_default);
%}
@ -156,7 +156,7 @@ instruct xLoadP_acq(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0)
format %{ "LD acq $dst, $mem" %}
ins_encode %{
__ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
x_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
x_load_barrier(masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
// Uses the isync instruction as an acquire barrier.
// This exploits the compare and the branch in the z load barrier (load, compare and branch, isync).
@ -175,7 +175,7 @@ instruct xCompareAndSwapP(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
x_compare_and_swap(_masm, this,
x_compare_and_swap(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
$tmp_xchg$$Register, $tmp_mask$$Register,
false /* weak */, false /* acquire */);
@ -193,7 +193,7 @@ instruct xCompareAndSwapP_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegP
format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
x_compare_and_swap(_masm, this,
x_compare_and_swap(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
$tmp_xchg$$Register, $tmp_mask$$Register,
false /* weak */, true /* acquire */);
@ -211,7 +211,7 @@ instruct xCompareAndSwapPWeak(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegP
format %{ "weak CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
x_compare_and_swap(_masm, this,
x_compare_and_swap(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
$tmp_xchg$$Register, $tmp_mask$$Register,
true /* weak */, false /* acquire */);
@ -229,7 +229,7 @@ instruct xCompareAndSwapPWeak_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, i
format %{ "weak CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
x_compare_and_swap(_masm, this,
x_compare_and_swap(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
$tmp_xchg$$Register, $tmp_mask$$Register,
true /* weak */, true /* acquire */);
@ -250,7 +250,7 @@ instruct xCompareAndExchangeP(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegP
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
x_compare_and_exchange(_masm, this,
x_compare_and_exchange(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
false /* weak */, false /* acquire */);
%}
@ -270,7 +270,7 @@ instruct xCompareAndExchangeP_acq(iRegPdst res, iRegPdst mem, iRegPsrc oldval, i
format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
x_compare_and_exchange(_masm, this,
x_compare_and_exchange(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
false /* weak */, true /* acquire */);
%}
@ -286,7 +286,7 @@ instruct xGetAndSetP(iRegPdst res, iRegPdst mem, iRegPsrc newval, iRegPdst tmp,
format %{ "GetAndSetP $res, $mem, $newval" %}
ins_encode %{
__ getandsetd($res$$Register, $newval$$Register, $mem$$Register, MacroAssembler::cmpxchgx_hint_atomic_update());
x_load_barrier(_masm, this, Address(noreg, (intptr_t) 0), $res$$Register, $tmp$$Register, barrier_data());
x_load_barrier(masm, this, Address(noreg, (intptr_t) 0), $res$$Register, $tmp$$Register, barrier_data());
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ isync();

View File

@ -34,7 +34,7 @@ source %{
#include "gc/z/zBarrierSetAssembler.hpp"
static void z_color(MacroAssembler& _masm, Register dst, Register src) {
static void z_color(MacroAssembler* masm, Register dst, Register src) {
assert_different_registers(dst, src);
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBits);
__ li(dst, barrier_Relocation::unpatched); // Load color bits.
@ -47,55 +47,55 @@ static void z_color(MacroAssembler& _masm, Register dst, Register src) {
}
}
static void z_uncolor(MacroAssembler& _masm, Register ref) {
static void z_uncolor(MacroAssembler* masm, Register ref) {
__ srdi(ref, ref, ZPointerLoadShift);
}
static void check_color(MacroAssembler& _masm, Register ref, bool on_non_strong) {
static void check_color(MacroAssembler* masm, Register ref, bool on_non_strong) {
int relocFormat = on_non_strong ? ZBarrierRelocationFormatMarkBadMask
: ZBarrierRelocationFormatLoadBadMask;
__ relocate(barrier_Relocation::spec(), relocFormat);
__ andi_(R0, ref, barrier_Relocation::unpatched);
}
static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref) {
Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
static void z_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref) {
Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
if (node->barrier_data() == ZBarrierElided) {
z_uncolor(_masm, ref);
z_uncolor(masm, ref);
} else {
const bool on_non_strong =
((node->barrier_data() & ZBarrierWeak) != 0) ||
((node->barrier_data() & ZBarrierPhantom) != 0);
check_color(_masm, ref, on_non_strong);
check_color(masm, ref, on_non_strong);
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
__ bne_far(CCR0, *stub->entry(), MacroAssembler::bc_far_optimize_on_relocate);
z_uncolor(_masm, ref);
z_uncolor(masm, ref);
__ bind(*stub->continuation());
}
}
static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Register ref_base, intptr_t disp, Register rnew_zaddress, Register rnew_zpointer, bool is_atomic) {
Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Register ref_base, intptr_t disp, Register rnew_zaddress, Register rnew_zpointer, bool is_atomic) {
Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
if (node->barrier_data() == ZBarrierElided) {
z_color(_masm, rnew_zpointer, rnew_zaddress);
z_color(masm, rnew_zpointer, rnew_zaddress);
} else {
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, Address(ref_base, disp), rnew_zaddress, rnew_zpointer, is_native, is_atomic);
ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler();
bs_asm->store_barrier_fast(&_masm, ref_base, disp, rnew_zaddress, rnew_zpointer, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
bs_asm->store_barrier_fast(masm, ref_base, disp, rnew_zaddress, rnew_zpointer, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
}
}
static void z_compare_and_swap(MacroAssembler& _masm, const MachNode* node,
static void z_compare_and_swap(MacroAssembler* masm, const MachNode* node,
Register res, Register mem, Register oldval, Register newval,
Register tmp1, Register tmp2, bool acquire) {
Register rold_zpointer = tmp1, rnew_zpointer = tmp2;
z_store_barrier(_masm, node, mem, 0, newval, rnew_zpointer, true /* is_atomic */);
z_color(_masm, rold_zpointer, oldval);
z_store_barrier(masm, node, mem, 0, newval, rnew_zpointer, true /* is_atomic */);
z_color(masm, rold_zpointer, oldval);
__ cmpxchgd(CCR0, R0, rold_zpointer, rnew_zpointer, mem,
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, nullptr, true,
false /* we could support weak, but benefit is questionable */);
@ -111,17 +111,17 @@ static void z_compare_and_swap(MacroAssembler& _masm, const MachNode* node,
}
}
static void z_compare_and_exchange(MacroAssembler& _masm, const MachNode* node,
static void z_compare_and_exchange(MacroAssembler* masm, const MachNode* node,
Register res, Register mem, Register oldval, Register newval,
Register tmp, bool acquire) {
Register rold_zpointer = R0, rnew_zpointer = tmp;
z_store_barrier(_masm, node, mem, 0, newval, rnew_zpointer, true /* is_atomic */);
z_color(_masm, rold_zpointer, oldval);
z_store_barrier(masm, node, mem, 0, newval, rnew_zpointer, true /* is_atomic */);
z_color(masm, rold_zpointer, oldval);
__ cmpxchgd(CCR0, res, rold_zpointer, rnew_zpointer, mem,
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, nullptr, true,
false /* we could support weak, but benefit is questionable */);
z_uncolor(_masm, res);
z_uncolor(masm, res);
if (acquire) {
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
@ -149,7 +149,7 @@ instruct zLoadP(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
ins_encode %{
assert($mem$$index == 0, "sanity");
__ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
z_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register);
z_load_barrier(masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register);
%}
ins_pipe(pipe_class_default);
%}
@ -167,7 +167,7 @@ instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
format %{ "LD acq $dst, $mem" %}
ins_encode %{
__ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
z_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register);
z_load_barrier(masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register);
// Uses the isync instruction as an acquire barrier.
// This exploits the compare and the branch in the z load barrier (load, compare and branch, isync).
@ -186,7 +186,7 @@ instruct zStoreP(memoryAlg4 mem, iRegPsrc src, iRegPdst tmp, flagsRegCR0 cr0)
ins_cost(2 * MEMORY_REF_COST);
format %{ "std $mem, $src\t# ptr" %}
ins_encode %{
z_store_barrier(_masm, this, $mem$$base$$Register, $mem$$disp, $src$$Register, $tmp$$Register, false /* is_atomic */);
z_store_barrier(masm, this, $mem$$base$$Register, $mem$$disp, $src$$Register, $tmp$$Register, false /* is_atomic */);
__ std($tmp$$Register, $mem$$disp, $mem$$base$$Register);
%}
ins_pipe(pipe_class_default);
@ -200,7 +200,7 @@ instruct zStorePNull(memoryAlg4 mem, immP_0 zero, iRegPdst tmp, flagsRegCR0 cr0)
ins_cost(MEMORY_REF_COST);
format %{ "std $mem, null\t# ptr" %}
ins_encode %{
z_store_barrier(_masm, this, $mem$$base$$Register, $mem$$disp, noreg, $tmp$$Register, false /* is_atomic */);
z_store_barrier(masm, this, $mem$$base$$Register, $mem$$disp, noreg, $tmp$$Register, false /* is_atomic */);
__ std($tmp$$Register, $mem$$disp, $mem$$base$$Register);
%}
ins_pipe(pipe_class_default);
@ -217,7 +217,7 @@ instruct zCompareAndSwapP(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
z_compare_and_swap(_masm, this,
z_compare_and_swap(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
false /* acquire */);
@ -236,7 +236,7 @@ instruct zCompareAndSwapP_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegP
format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
z_compare_and_swap(_masm, this,
z_compare_and_swap(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
true /* acquire */);
@ -257,7 +257,7 @@ instruct zCompareAndExchangeP(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegP
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
z_compare_and_exchange(_masm, this,
z_compare_and_exchange(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
false /* acquire */);
%}
@ -277,7 +277,7 @@ instruct zCompareAndExchangeP_acq(iRegPdst res, iRegPdst mem, iRegPsrc oldval, i
format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
z_compare_and_exchange(_masm, this,
z_compare_and_exchange(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
true /* acquire */);
%}
@ -293,9 +293,9 @@ instruct zGetAndSetP(iRegPdst res, iRegPdst mem, iRegPsrc newval, iRegPdst tmp,
format %{ "GetAndSetP $res, $mem, $newval" %}
ins_encode %{
Register rnew_zpointer = $tmp$$Register, result = $res$$Register;
z_store_barrier(_masm, this, $mem$$Register, 0, $newval$$Register, rnew_zpointer, true /* is_atomic */);
z_store_barrier(masm, this, $mem$$Register, 0, $newval$$Register, rnew_zpointer, true /* is_atomic */);
__ getandsetd(result, rnew_zpointer, $mem$$Register, MacroAssembler::cmpxchgx_hint_atomic_update());
z_uncolor(_masm, result);
z_uncolor(masm, result);
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ isync();

View File

@ -1079,7 +1079,7 @@ bool followed_by_acquire(const Node *load) {
return false;
}
#define __ _masm.
#define __ masm->
// Tertiary op of a LoadP or StoreP encoding.
#define REGP_OP true
@ -1189,8 +1189,7 @@ int cmprb_Whitespace_reg_reg_prefixedNode::compute_padding(int current_offset) c
//=============================================================================
// Emit an interrupt that is caught by the debugger (for debugging compiler).
void emit_break(CodeBuffer &cbuf) {
C2_MacroAssembler _masm(&cbuf);
void emit_break(C2_MacroAssembler *masm) {
__ illtrap();
}
@ -1200,8 +1199,8 @@ void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
emit_break(cbuf);
void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
emit_break(masm);
}
uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
@ -1210,14 +1209,13 @@ uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
//=============================================================================
void emit_nop(CodeBuffer &cbuf) {
C2_MacroAssembler _masm(&cbuf);
void emit_nop(C2_MacroAssembler *masm) {
__ nop();
}
static inline void emit_long(CodeBuffer &cbuf, int value) {
*((int*)(cbuf.insts_end())) = value;
cbuf.set_insts_end(cbuf.insts_end() + BytesPerInstWord);
static inline void emit_long(C2_MacroAssembler *masm, int value) {
*((int*)(__ pc())) = value;
__ set_inst_end(__ pc() + BytesPerInstWord);
}
//=============================================================================
@ -1237,7 +1235,7 @@ class CallStubImpl {
public:
// Emit call stub, compiled java to interpreter.
static void emit_trampoline_stub(C2_MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset);
static void emit_trampoline_stub(C2_MacroAssembler *masm, int destination_toc_offset, int insts_call_instruction_offset);
// Size of call trampoline stub.
// This doesn't need to be accurate to the byte, but it
@ -1268,7 +1266,7 @@ source %{
// load the call target from the constant pool
// branch via CTR (LR/link still points to the call-site above)
void CallStubImpl::emit_trampoline_stub(C2_MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset) {
void CallStubImpl::emit_trampoline_stub(C2_MacroAssembler *masm, int destination_toc_offset, int insts_call_instruction_offset) {
address stub = __ emit_trampoline_stub(destination_toc_offset, insts_call_instruction_offset);
if (stub == nullptr) {
ciEnv::current()->record_out_of_memory_failure();
@ -1299,7 +1297,7 @@ typedef struct {
// - Add a relocation at the branch-and-link instruction.
// - Emit a branch-and-link.
// - Remember the return pc offset.
EmitCallOffsets emit_call_with_trampoline_stub(C2_MacroAssembler &_masm, address entry_point, relocInfo::relocType rtype) {
EmitCallOffsets emit_call_with_trampoline_stub(C2_MacroAssembler *masm, address entry_point, relocInfo::relocType rtype) {
EmitCallOffsets offsets = { -1, -1 };
const int start_offset = __ offset();
offsets.insts_call_instruction_offset = __ offset();
@ -1316,7 +1314,7 @@ EmitCallOffsets emit_call_with_trampoline_stub(C2_MacroAssembler &_masm, address
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
// Emit the trampoline stub which will be related to the branch-and-link below.
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
CallStubImpl::emit_trampoline_stub(masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
if (ciEnv::current()->failing()) { return offsets; } // Code cache may be full.
__ relocate(rtype);
@ -1366,7 +1364,7 @@ void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, Phase
nodes->push(m2);
}
void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
// Is postalloc expanded.
ShouldNotReachHere();
}
@ -1404,9 +1402,8 @@ void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
C2_MacroAssembler _masm(&cbuf);
const long framesize = C->output()->frame_size_in_bytes();
assert(framesize % (2 * wordSize) == 0, "must preserve 2*wordSize alignment");
@ -1556,10 +1553,10 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
if (C->stub_function() == nullptr) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->nmethod_entry_barrier(&_masm, push_frame_temp);
bs->nmethod_entry_barrier(masm, push_frame_temp);
}
C->output()->set_frame_complete(cbuf.insts_size());
C->output()->set_frame_complete(__ offset());
}
uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
@ -1588,9 +1585,8 @@ void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
C2_MacroAssembler _masm(&cbuf);
const long framesize = ((long)C->output()->frame_slots()) << LogBytesPerInt;
assert(framesize >= 0, "negative frame-size?");
@ -1668,7 +1664,7 @@ static enum RC rc_class(OptoReg::Name reg) {
return rc_stack;
}
static int ld_st_helper(CodeBuffer *cbuf, const char *op_str, uint opcode, int reg, int offset,
static int ld_st_helper(C2_MacroAssembler *masm, const char *op_str, uint opcode, int reg, int offset,
bool do_print, Compile* C, outputStream *st) {
assert(opcode == Assembler::LD_OPCODE ||
@ -1681,12 +1677,12 @@ static int ld_st_helper(CodeBuffer *cbuf, const char *op_str, uint opcode, int r
opcode == Assembler::STFS_OPCODE,
"opcode not supported");
if (cbuf) {
if (masm) {
int d =
(Assembler::LD_OPCODE == opcode || Assembler::STD_OPCODE == opcode) ?
Assembler::ds(offset+0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/)
: Assembler::d1(offset+0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/); // Makes no difference in opt build.
emit_long(*cbuf, opcode | Assembler::rt(Matcher::_regEncode[reg]) | d | Assembler::ra(R1_SP));
emit_long(masm, opcode | Assembler::rt(Matcher::_regEncode[reg]) | d | Assembler::ra(R1_SP));
}
#ifndef PRODUCT
else if (do_print) {
@ -1699,7 +1695,7 @@ static int ld_st_helper(CodeBuffer *cbuf, const char *op_str, uint opcode, int r
return 4; // size
}
uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
Compile* C = ra_->C;
// Get registers to move.
@ -1729,8 +1725,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
int src_offset = ra_->reg2offset(src_lo);
int dst_offset = ra_->reg2offset(dst_lo);
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
if (masm) {
__ ld(R0, src_offset, R1_SP);
__ std(R0, dst_offset, R1_SP);
__ ld(R0, src_offset+8, R1_SP);
@ -1742,8 +1737,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
else if (src_lo_rc == rc_vs && dst_lo_rc == rc_stack) {
VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]);
int dst_offset = ra_->reg2offset(dst_lo);
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
if (masm) {
__ addi(R0, R1_SP, dst_offset);
__ stxvd2x(Rsrc, R0);
}
@ -1753,8 +1747,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vs) {
VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]);
int src_offset = ra_->reg2offset(src_lo);
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
if (masm) {
__ addi(R0, R1_SP, src_offset);
__ lxvd2x(Rdst, R0);
}
@ -1764,8 +1757,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
else if (src_lo_rc == rc_vs && dst_lo_rc == rc_vs) {
VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]);
VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]);
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
if (masm) {
__ xxlor(Rdst, Rsrc, Rsrc);
}
size += 4;
@ -1784,13 +1776,13 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (src_hi != OptoReg::Bad) {
assert(src_hi_rc==rc_stack && dst_hi_rc==rc_stack,
"expected same type of move for high parts");
size += ld_st_helper(cbuf, "LD ", Assembler::LD_OPCODE, R0_num, src_offset, !do_size, C, st);
if (!cbuf && !do_size) st->print("\n\t");
size += ld_st_helper(cbuf, "STD ", Assembler::STD_OPCODE, R0_num, dst_offset, !do_size, C, st);
size += ld_st_helper(masm, "LD ", Assembler::LD_OPCODE, R0_num, src_offset, !do_size, C, st);
if (!masm && !do_size) st->print("\n\t");
size += ld_st_helper(masm, "STD ", Assembler::STD_OPCODE, R0_num, dst_offset, !do_size, C, st);
} else {
size += ld_st_helper(cbuf, "LWZ ", Assembler::LWZ_OPCODE, R0_num, src_offset, !do_size, C, st);
if (!cbuf && !do_size) st->print("\n\t");
size += ld_st_helper(cbuf, "STW ", Assembler::STW_OPCODE, R0_num, dst_offset, !do_size, C, st);
size += ld_st_helper(masm, "LWZ ", Assembler::LWZ_OPCODE, R0_num, src_offset, !do_size, C, st);
if (!masm && !do_size) st->print("\n\t");
size += ld_st_helper(masm, "STW ", Assembler::STW_OPCODE, R0_num, dst_offset, !do_size, C, st);
}
return size;
}
@ -1808,8 +1800,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
Register Rdst = as_Register(Matcher::_regEncode[dst_lo]);
size = (Rsrc != Rdst) ? 4 : 0;
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
if (masm) {
if (size) {
__ mr(Rdst, Rsrc);
}
@ -1832,9 +1823,9 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (src_hi != OptoReg::Bad) {
assert(src_hi_rc==rc_int && dst_hi_rc==rc_stack,
"expected same type of move for high parts");
size += ld_st_helper(cbuf, "STD ", Assembler::STD_OPCODE, src_lo, dst_offset, !do_size, C, st);
size += ld_st_helper(masm, "STD ", Assembler::STD_OPCODE, src_lo, dst_offset, !do_size, C, st);
} else {
size += ld_st_helper(cbuf, "STW ", Assembler::STW_OPCODE, src_lo, dst_offset, !do_size, C, st);
size += ld_st_helper(masm, "STW ", Assembler::STW_OPCODE, src_lo, dst_offset, !do_size, C, st);
}
return size;
}
@ -1845,17 +1836,16 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (src_hi != OptoReg::Bad) {
assert(dst_hi_rc==rc_int && src_hi_rc==rc_stack,
"expected same type of move for high parts");
size += ld_st_helper(cbuf, "LD ", Assembler::LD_OPCODE, dst_lo, src_offset, !do_size, C, st);
size += ld_st_helper(masm, "LD ", Assembler::LD_OPCODE, dst_lo, src_offset, !do_size, C, st);
} else {
size += ld_st_helper(cbuf, "LWZ ", Assembler::LWZ_OPCODE, dst_lo, src_offset, !do_size, C, st);
size += ld_st_helper(masm, "LWZ ", Assembler::LWZ_OPCODE, dst_lo, src_offset, !do_size, C, st);
}
return size;
}
// Check for float reg-reg copy.
if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
if (masm) {
FloatRegister Rsrc = as_FloatRegister(Matcher::_regEncode[src_lo]);
FloatRegister Rdst = as_FloatRegister(Matcher::_regEncode[dst_lo]);
__ fmr(Rdst, Rsrc);
@ -1874,9 +1864,9 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (src_hi != OptoReg::Bad) {
assert(src_hi_rc==rc_float && dst_hi_rc==rc_stack,
"expected same type of move for high parts");
size += ld_st_helper(cbuf, "STFD", Assembler::STFD_OPCODE, src_lo, dst_offset, !do_size, C, st);
size += ld_st_helper(masm, "STFD", Assembler::STFD_OPCODE, src_lo, dst_offset, !do_size, C, st);
} else {
size += ld_st_helper(cbuf, "STFS", Assembler::STFS_OPCODE, src_lo, dst_offset, !do_size, C, st);
size += ld_st_helper(masm, "STFS", Assembler::STFS_OPCODE, src_lo, dst_offset, !do_size, C, st);
}
return size;
}
@ -1887,9 +1877,9 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (src_hi != OptoReg::Bad) {
assert(dst_hi_rc==rc_float && src_hi_rc==rc_stack,
"expected same type of move for high parts");
size += ld_st_helper(cbuf, "LFD ", Assembler::LFD_OPCODE, dst_lo, src_offset, !do_size, C, st);
size += ld_st_helper(masm, "LFD ", Assembler::LFD_OPCODE, dst_lo, src_offset, !do_size, C, st);
} else {
size += ld_st_helper(cbuf, "LFS ", Assembler::LFS_OPCODE, dst_lo, src_offset, !do_size, C, st);
size += ld_st_helper(masm, "LFS ", Assembler::LFS_OPCODE, dst_lo, src_offset, !do_size, C, st);
}
return size;
}
@ -1914,8 +1904,8 @@ void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
implementation(&cbuf, ra_, false, nullptr);
void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
implementation(masm, ra_, false, nullptr);
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
@ -1928,8 +1918,7 @@ void MachNopNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *) const {
C2_MacroAssembler _masm(&cbuf);
void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *) const {
// _count contains the number of nops needed for padding.
for (int i = 0; i < _count; i++) {
__ nop();
@ -1949,9 +1938,7 @@ void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_encode(this);
@ -1974,10 +1961,8 @@ void MachUEPNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
void MachUEPNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
// This is the unverified entry point.
C2_MacroAssembler _masm(&cbuf);
__ ic_check(CodeEntryAlignment);
// Argument is valid and klass is as expected, continue.
}
@ -1997,8 +1982,8 @@ class HandlerImpl {
public:
static int emit_exception_handler(CodeBuffer &cbuf);
static int emit_deopt_handler(CodeBuffer& cbuf);
static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
// The exception_handler is a b64_patchable.
@ -2023,9 +2008,7 @@ public:
source %{
int HandlerImpl::emit_exception_handler(CodeBuffer &cbuf) {
C2_MacroAssembler _masm(&cbuf);
int HandlerImpl::emit_exception_handler(C2_MacroAssembler *masm) {
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@ -2043,9 +2026,7 @@ int HandlerImpl::emit_exception_handler(CodeBuffer &cbuf) {
// The deopt_handler is like the exception handler, but it calls to
// the deoptimization blob instead of jumping to the exception blob.
int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
C2_MacroAssembler _masm(&cbuf);
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
address base = __ start_a_stub(size_deopt_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@ -2421,27 +2402,23 @@ const RegMask Matcher::method_handle_invoke_SP_save_mask() {
// needs for encoding need to be specified.
encode %{
enc_class enc_unimplemented %{
C2_MacroAssembler _masm(&cbuf);
__ unimplemented("Unimplemented mach node encoding in AD file.", 13);
%}
enc_class enc_untested %{
#ifdef ASSERT
C2_MacroAssembler _masm(&cbuf);
__ untested("Untested mach node encoding in AD file.");
#else
#endif
%}
enc_class enc_lbz(iRegIdst dst, memory mem) %{
C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ lbz($dst$$Register, Idisp, $mem$$base$$Register);
%}
// Load acquire.
enc_class enc_lbz_ac(iRegIdst dst, memory mem) %{
C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ lbz($dst$$Register, Idisp, $mem$$base$$Register);
__ twi_0($dst$$Register);
@ -2449,16 +2426,12 @@ encode %{
%}
enc_class enc_lhz(iRegIdst dst, memory mem) %{
C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ lhz($dst$$Register, Idisp, $mem$$base$$Register);
%}
// Load acquire.
enc_class enc_lhz_ac(iRegIdst dst, memory mem) %{
C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ lhz($dst$$Register, Idisp, $mem$$base$$Register);
__ twi_0($dst$$Register);
@ -2466,16 +2439,12 @@ encode %{
%}
enc_class enc_lwz(iRegIdst dst, memory mem) %{
C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ lwz($dst$$Register, Idisp, $mem$$base$$Register);
%}
// Load acquire.
enc_class enc_lwz_ac(iRegIdst dst, memory mem) %{
C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ lwz($dst$$Register, Idisp, $mem$$base$$Register);
__ twi_0($dst$$Register);
@ -2483,7 +2452,6 @@ encode %{
%}
enc_class enc_ld(iRegLdst dst, memoryAlg4 mem) %{
C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
// Operand 'ds' requires 4-alignment.
assert((Idisp & 0x3) == 0, "unaligned offset");
@ -2492,7 +2460,6 @@ encode %{
// Load acquire.
enc_class enc_ld_ac(iRegLdst dst, memoryAlg4 mem) %{
C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
// Operand 'ds' requires 4-alignment.
assert((Idisp & 0x3) == 0, "unaligned offset");
@ -2502,14 +2469,11 @@ encode %{
%}
enc_class enc_lfd(RegF dst, memory mem) %{
C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ lfd($dst$$FloatRegister, Idisp, $mem$$base$$Register);
%}
enc_class enc_load_long_constL(iRegLdst dst, immL src, iRegLdst toc) %{
C2_MacroAssembler _masm(&cbuf);
int toc_offset = 0;
address const_toc_addr;
@ -2531,9 +2495,6 @@ encode %{
%}
enc_class enc_load_long_constL_hi(iRegLdst dst, iRegLdst toc, immL src) %{
C2_MacroAssembler _masm(&cbuf);
if (!ra_->C->output()->in_scratch_emit_size()) {
address const_toc_addr;
// Create a non-oop constant, no relocation needed.
@ -2765,8 +2726,6 @@ encode %{
%}
enc_class enc_load_long_constP(iRegLdst dst, immP src, iRegLdst toc) %{
C2_MacroAssembler _masm(&cbuf);
int toc_offset = 0;
intptr_t val = $src$$constant;
@ -2799,8 +2758,6 @@ encode %{
%}
enc_class enc_load_long_constP_hi(iRegLdst dst, immP src, iRegLdst toc) %{
C2_MacroAssembler _masm(&cbuf);
if (!ra_->C->output()->in_scratch_emit_size()) {
intptr_t val = $src$$constant;
relocInfo::relocType constant_reloc = $src->constant_reloc(); // src
@ -2935,13 +2892,11 @@ encode %{
%}
enc_class enc_stw(iRegIsrc src, memory mem) %{
C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ stw($src$$Register, Idisp, $mem$$base$$Register);
%}
enc_class enc_std(iRegIsrc src, memoryAlg4 mem) %{
C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
// Operand 'ds' requires 4-alignment.
assert((Idisp & 0x3) == 0, "unaligned offset");
@ -2949,13 +2904,11 @@ encode %{
%}
enc_class enc_stfs(RegF src, memory mem) %{
C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ stfs($src$$FloatRegister, Idisp, $mem$$base$$Register);
%}
enc_class enc_stfd(RegF src, memory mem) %{
C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ stfd($src$$FloatRegister, Idisp, $mem$$base$$Register);
%}
@ -3154,8 +3107,6 @@ encode %{
%}
enc_class enc_cmove_reg(iRegIdst dst, flagsRegSrc crx, iRegIsrc src, cmpOp cmp) %{
C2_MacroAssembler _masm(&cbuf);
int cc = $cmp$$cmpcode;
int flags_reg = $crx$$reg;
Label done;
@ -3167,8 +3118,6 @@ encode %{
%}
enc_class enc_cmove_imm(iRegIdst dst, flagsRegSrc crx, immI16 src, cmpOp cmp) %{
C2_MacroAssembler _masm(&cbuf);
Label done;
assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding");
// Branch if not (cmp crx).
@ -3180,14 +3129,10 @@ encode %{
// This enc_class is needed so that scheduler gets proper
// input mapping for latency computation.
enc_class enc_andc(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
C2_MacroAssembler _masm(&cbuf);
__ andc($dst$$Register, $src1$$Register, $src2$$Register);
%}
enc_class enc_convI2B_regI__cmove(iRegIdst dst, iRegIsrc src, flagsReg crx, immI16 zero, immI16 notzero) %{
C2_MacroAssembler _masm(&cbuf);
Label done;
__ cmpwi($crx$$CondRegister, $src$$Register, 0);
__ li($dst$$Register, $zero$$constant);
@ -3197,9 +3142,6 @@ encode %{
%}
enc_class enc_convP2B_regP__cmove(iRegIdst dst, iRegPsrc src, flagsReg crx, immI16 zero, immI16 notzero) %{
C2_MacroAssembler _masm(&cbuf);
Label done;
__ cmpdi($crx$$CondRegister, $src$$Register, 0);
__ li($dst$$Register, $zero$$constant);
@ -3209,8 +3151,6 @@ encode %{
%}
enc_class enc_cmove_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL mem ) %{
C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
Label done;
__ bso($crx$$CondRegister, done);
@ -3219,8 +3159,6 @@ encode %{
%}
enc_class enc_cmove_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{
C2_MacroAssembler _masm(&cbuf);
Label done;
__ bso($crx$$CondRegister, done);
__ mffprd($dst$$Register, $src$$FloatRegister);
@ -3228,8 +3166,6 @@ encode %{
%}
enc_class enc_bc(flagsRegSrc crx, cmpOp cmp, Label lbl) %{
C2_MacroAssembler _masm(&cbuf);
Label d; // dummy
__ bind(d);
Label* p = ($lbl$$label);
@ -3257,8 +3193,6 @@ encode %{
enc_class enc_bc_far(flagsRegSrc crx, cmpOp cmp, Label lbl) %{
// The scheduler doesn't know about branch shortening, so we set the opcode
// to ppc64Opcode_bc in order to hide this detail from the scheduler.
C2_MacroAssembler _masm(&cbuf);
Label d; // dummy
__ bind(d);
Label* p = ($lbl$$label);
@ -3333,7 +3267,6 @@ encode %{
// Fake operand dst needed for PPC scheduler.
assert($dst$$constant == 0x0, "dst must be 0x0");
C2_MacroAssembler _masm(&cbuf);
// Mark the code position where the load from the safepoint
// polling page was emitted as relocInfo::poll_type.
__ relocate(relocInfo::poll_type);
@ -3387,13 +3320,11 @@ encode %{
//
// Usage of r1 and r2 in the stubs allows to distinguish them.
enc_class enc_java_static_call(method meth) %{
C2_MacroAssembler _masm(&cbuf);
address entry_point = (address)$meth$$method;
if (!_method) {
// A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
emit_call_with_trampoline_stub(_masm, entry_point, relocInfo::runtime_call_type);
emit_call_with_trampoline_stub(masm, entry_point, relocInfo::runtime_call_type);
if (ciEnv::current()->failing()) { return; } // Code cache may be full.
} else {
// Remember the offset not the address.
@ -3413,9 +3344,9 @@ encode %{
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
// Emit the trampoline stub which will be related to the branch-and-link below.
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
CallStubImpl::emit_trampoline_stub(masm, entry_point_toc_offset, start_offset);
if (ciEnv::current()->failing()) { return; } // Code cache may be full.
int method_index = resolved_method_index(cbuf);
int method_index = resolved_method_index(masm);
__ relocate(_optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
: static_call_Relocation::spec(method_index));
@ -3423,11 +3354,12 @@ encode %{
// Note: At this point we do not have the address of the trampoline
// stub, and the entry point might be too far away for bl, so __ pc()
// serves as dummy and the bl will be patched later.
cbuf.set_insts_mark();
__ set_inst_mark();
__ bl(__ pc()); // Emits a relocation.
// The stub for call to interpreter.
address stub = CompiledDirectCall::emit_to_interp_stub(cbuf);
address stub = CompiledDirectCall::emit_to_interp_stub(masm);
__ clear_inst_mark();
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@ -3438,9 +3370,6 @@ encode %{
// Second node of expanded dynamic call - the call.
enc_class enc_java_dynamic_call_sched(method meth) %{
C2_MacroAssembler _masm(&cbuf);
if (!ra_->C->output()->in_scratch_emit_size()) {
// Create a call trampoline stub for the given method.
const address entry_point = !($meth$$method) ? 0 : (address)$meth$$method;
@ -3450,7 +3379,7 @@ encode %{
return;
}
const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const);
CallStubImpl::emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset());
CallStubImpl::emit_trampoline_stub(masm, entry_point_const_toc_offset, __ offset());
if (ra_->C->env()->failing()) { return; } // Code cache may be full.
// Build relocation at call site with ic position as data.
@ -3466,7 +3395,7 @@ encode %{
const address virtual_call_oop_addr = __ addr_at(virtual_call_oop_addr_offset);
assert(MacroAssembler::is_load_const_from_method_toc_at(virtual_call_oop_addr),
"should be load from TOC");
int method_index = resolved_method_index(cbuf);
int method_index = resolved_method_index(masm);
__ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr, method_index));
}
@ -3541,7 +3470,6 @@ encode %{
// Toc is only passed so that it can be used in ins_encode statement.
// In the code we have to use $constanttablebase.
enc_class enc_java_dynamic_call(method meth, iRegLdst toc) %{
C2_MacroAssembler _masm(&cbuf);
int start_offset = __ offset();
Register Rtoc = (ra_) ? $constanttablebase : R2_TOC;
@ -3564,7 +3492,7 @@ encode %{
// CALL to fixup routine. Fixup routine uses ScopeDesc info
// to determine who we intended to call.
__ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr));
emit_call_with_trampoline_stub(_masm, (address)$meth$$method, relocInfo::none);
emit_call_with_trampoline_stub(masm, (address)$meth$$method, relocInfo::none);
if (ciEnv::current()->failing()) { return; } // Code cache may be full.
assert(((MachCallDynamicJavaNode*)this)->ret_addr_offset() == __ offset() - start_offset,
"Fix constant in ret_addr_offset(), expected %d", __ offset() - start_offset);
@ -3595,8 +3523,6 @@ encode %{
// a runtime call
enc_class enc_java_to_runtime_call (method meth) %{
C2_MacroAssembler _masm(&cbuf);
const address start_pc = __ pc();
#if defined(ABI_ELFv2)
@ -3630,7 +3556,6 @@ encode %{
// This enc_class is needed so that scheduler gets proper
// input mapping for latency computation.
enc_class enc_leaf_call_mtctr(iRegLsrc src) %{
C2_MacroAssembler _masm(&cbuf);
__ mtctr($src$$Register);
%}
@ -4085,7 +4010,7 @@ operand immN() %{
interface(CONST_INTER);
%}
// Null Pointer Immediate
// nullptr Pointer Immediate
operand immN_0() %{
predicate(n->get_narrowcon() == 0);
match(ConN);
@ -14581,8 +14506,9 @@ instruct RethrowException() %{
format %{ "Jmp rethrow_stub" %}
ins_encode %{
cbuf.set_insts_mark();
__ set_inst_mark();
__ b64_patchable((address)OptoRuntime::rethrow_stub(), relocInfo::runtime_call_type);
__ clear_inst_mark();
%}
ins_pipe(pipe_class_call);
%}

View File

@ -1760,8 +1760,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ b(L_exit);
// static stub for the call above
CodeBuffer* cbuf = masm->code_section()->outer();
stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, c2i_call_pc);
stub = CompiledDirectCall::emit_to_interp_stub(masm, c2i_call_pc);
guarantee(stub != nullptr, "no space for static stub");
}
@ -1853,8 +1852,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ blr();
// static stub for the call above
CodeBuffer* cbuf = masm->code_section()->outer();
stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, call_pc);
stub = CompiledDirectCall::emit_to_interp_stub(masm, call_pc);
guarantee(stub != nullptr, "no space for static stub");
}

View File

@ -35,23 +35,19 @@
// ----------------------------------------------------------------------------
#define __ _masm.
address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
precond(cbuf.stubs()->start() != badAddress);
precond(cbuf.stubs()->end() != badAddress);
#define __ masm->
address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address mark) {
precond(__ code()->stubs()->start() != badAddress);
precond(__ code()->stubs()->end() != badAddress);
// Stub is fixed up when the corresponding call is converted from
// calling compiled code to calling interpreted code.
// mv xmethod, 0
// jalr -4 # to self
if (mark == nullptr) {
mark = cbuf.insts_mark(); // Get mark within main instrs section.
mark = __ inst_mark(); // Get mark within main instrs section.
}
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a stub.
MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(to_interp_stub_size());
int offset = __ offset();
if (base == nullptr) {

View File

@ -41,7 +41,7 @@ instruct compareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, i
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
false /* is_cae */, $res$$Register);
%}
@ -62,7 +62,7 @@ instruct compareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, i
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
false /* is_cae */, $res$$Register);
%}
@ -84,7 +84,7 @@ instruct compareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP oldval
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::aq /* acquire */, Assembler::rl /* release */,
false /* is_cae */, $res$$Register);
%}
@ -106,7 +106,7 @@ instruct compareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::aq /* acquire */, Assembler::rl /* release */,
false /* is_cae */, $res$$Register);
%}
@ -126,7 +126,7 @@ instruct compareAndExchangeN_shenandoah(iRegNNoSp res, indirect mem, iRegN oldva
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
true /* is_cae */, $res$$Register);
%}
@ -146,7 +146,7 @@ instruct compareAndExchangeP_shenandoah(iRegPNoSp res, indirect mem, iRegP oldva
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
true /* is_cae */, $res$$Register);
%}
@ -168,7 +168,7 @@ instruct weakCompareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldva
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
// Weak is not current supported by ShenandoahBarrierSet::cmpxchg_oop
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
false /* is_cae */, $res$$Register);
%}
@ -189,7 +189,7 @@ instruct compareAndExchangeNAcq_shenandoah(iRegNNoSp res, indirect mem, iRegN ol
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register);
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::aq /* acquire */, Assembler::rl /* release */,
true /* is_cae */, $res$$Register);
%}
@ -210,7 +210,7 @@ instruct compareAndExchangePAcq_shenandoah(iRegPNoSp res, indirect mem, iRegP ol
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register);
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::aq /* acquire */, Assembler::rl /* release */,
true /* is_cae */, $res$$Register);
%}
@ -230,7 +230,7 @@ instruct weakCompareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldva
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
false /* is_cae */, $res$$Register);
%}
@ -253,7 +253,7 @@ instruct weakCompareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN ol
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
// Weak is not current supported by ShenandoahBarrierSet::cmpxchg_oop
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::aq /* acquire */, Assembler::rl /* release */,
false /* is_cae */, $res$$Register);
%}
@ -276,7 +276,7 @@ instruct weakCompareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP ol
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
// Weak is not current supported by ShenandoahBarrierSet::cmpxchg_oop
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::aq /* acquire */, Assembler::rl /* release */,
false /* is_cae */, $res$$Register);
%}

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
@ -32,7 +32,7 @@ source_hpp %{
source %{
static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, int barrier_data) {
static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, int barrier_data) {
if (barrier_data == XLoadBarrierElided) {
return;
}
@ -43,7 +43,7 @@ static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address
__ bind(*stub->continuation());
}
static void x_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
__ j(*stub->entry());
__ bind(*stub->continuation());
@ -65,7 +65,7 @@ instruct xLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp)
ins_encode %{
const Address ref_addr (as_Register($mem$$base), $mem$$disp);
__ ld($dst$$Register, ref_addr);
x_load_barrier(_masm, this, ref_addr, $dst$$Register, $tmp$$Register /* tmp */, barrier_data());
x_load_barrier(masm, this, ref_addr, $dst$$Register, $tmp$$Register /* tmp */, barrier_data());
%}
ins_pipe(iload_reg_mem);
@ -94,7 +94,7 @@ instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(t0, t0, $tmp$$Register);
__ beqz(t0, good);
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $tmp$$Register /* ref */, $res$$Register /* tmp */);
x_load_barrier_slow_path(masm, this, Address($mem$$Register), $tmp$$Register /* ref */, $res$$Register /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
true /* result_as_bool */);
@ -128,7 +128,7 @@ instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(t0, t0, $tmp$$Register);
__ beqz(t0, good);
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $tmp$$Register /* ref */, $res$$Register /* tmp */);
x_load_barrier_slow_path(masm, this, Address($mem$$Register), $tmp$$Register /* ref */, $res$$Register /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
true /* result_as_bool */);
@ -157,7 +157,7 @@ instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(t0, t0, $res$$Register);
__ beqz(t0, good);
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, $tmp$$Register /* tmp */);
x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, $tmp$$Register /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register);
__ bind(good);
@ -185,7 +185,7 @@ instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(t0, t0, $res$$Register);
__ beqz(t0, good);
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, $tmp$$Register /* tmp */);
x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, $tmp$$Register /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register);
__ bind(good);
@ -206,7 +206,7 @@ instruct xGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp) %{
ins_encode %{
__ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, $tmp$$Register /* tmp */, barrier_data());
x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, $tmp$$Register /* tmp */, barrier_data());
%}
ins_pipe(pipe_serial);
@ -223,7 +223,7 @@ instruct xGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp)
ins_encode %{
__ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, $tmp$$Register /* tmp */, barrier_data());
x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, $tmp$$Register /* tmp */, barrier_data());
%}
ins_pipe(pipe_serial);
%}

View File

@ -33,7 +33,7 @@ source_hpp %{
source %{
#include "gc/z/zBarrierSetAssembler.hpp"
static void z_color(MacroAssembler& _masm, const MachNode* node, Register dst, Register src, Register tmp) {
static void z_color(MacroAssembler* masm, const MachNode* node, Register dst, Register src, Register tmp) {
assert_different_registers(dst, tmp);
__ relocate(barrier_Relocation::spec(), [&] {
@ -43,11 +43,11 @@ static void z_color(MacroAssembler& _masm, const MachNode* node, Register dst, R
__ orr(dst, dst, tmp);
}
static void z_uncolor(MacroAssembler& _masm, const MachNode* node, Register ref) {
static void z_uncolor(MacroAssembler* masm, const MachNode* node, Register ref) {
__ srli(ref, ref, ZPointerLoadShift);
}
static void check_color(MacroAssembler& _masm, Register ref, bool on_non_strong, Register result) {
static void check_color(MacroAssembler* masm, Register ref, bool on_non_strong, Register result) {
int format = on_non_strong ? ZBarrierRelocationFormatMarkBadMask
: ZBarrierRelocationFormatLoadBadMask;
__ relocate(barrier_Relocation::spec(), [&] {
@ -56,35 +56,35 @@ static void check_color(MacroAssembler& _masm, Register ref, bool on_non_strong,
__ andr(result, ref, result);
}
static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
static void z_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
const bool on_non_strong =
((node->barrier_data() & ZBarrierWeak) != 0) ||
((node->barrier_data() & ZBarrierPhantom) != 0);
if (node->barrier_data() == ZBarrierElided) {
z_uncolor(_masm, node, ref);
z_uncolor(masm, node, ref);
return;
}
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
Label good;
check_color(_masm, ref, on_non_strong, tmp);
check_color(masm, ref, on_non_strong, tmp);
__ beqz(tmp, good);
__ j(*stub->entry());
__ bind(good);
z_uncolor(_masm, node, ref);
z_uncolor(masm, node, ref);
__ bind(*stub->continuation());
}
static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, Register tmp, bool is_atomic) {
static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, Register tmp, bool is_atomic) {
if (node->barrier_data() == ZBarrierElided) {
z_color(_masm, node, rnew_zpointer, rnew_zaddress, tmp);
z_color(masm, node, rnew_zpointer, rnew_zaddress, tmp);
} else {
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic);
ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler();
bs_asm->store_barrier_fast(&_masm, ref_addr, rnew_zaddress, rnew_zpointer, tmp, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
bs_asm->store_barrier_fast(masm, ref_addr, rnew_zaddress, rnew_zpointer, tmp, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
}
}
%}
@ -103,7 +103,7 @@ instruct zLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp)
ins_encode %{
const Address ref_addr(as_Register($mem$$base), $mem$$disp);
__ ld($dst$$Register, ref_addr);
z_load_barrier(_masm, this, ref_addr, $dst$$Register, $tmp$$Register);
z_load_barrier(masm, this, ref_addr, $dst$$Register, $tmp$$Register);
%}
ins_pipe(iload_reg_mem);
@ -120,7 +120,7 @@ instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2)
format %{ "sd $mem, $src\t# ptr" %}
ins_encode %{
const Address ref_addr(as_Register($mem$$base), $mem$$disp);
z_store_barrier(_masm, this, ref_addr, $src$$Register, $tmp1$$Register, $tmp2$$Register, false /* is_atomic */);
z_store_barrier(masm, this, ref_addr, $src$$Register, $tmp1$$Register, $tmp2$$Register, false /* is_atomic */);
__ sd($tmp1$$Register, ref_addr);
%}
ins_pipe(pipe_serial);
@ -141,8 +141,8 @@ instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva
ins_encode %{
guarantee($mem$$disp == 0, "impossible encoding");
Address ref_addr($mem$$Register);
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
z_color(masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
z_store_barrier(masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register, true /* result_as_bool */);
%}
@ -164,8 +164,8 @@ instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
ins_encode %{
guarantee($mem$$disp == 0, "impossible encoding");
Address ref_addr($mem$$Register);
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
z_color(masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
z_store_barrier(masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register, true /* result_as_bool */);
%}
@ -185,10 +185,10 @@ instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n
ins_encode %{
guarantee($mem$$disp == 0, "impossible encoding");
Address ref_addr($mem$$Register);
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
z_color(masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
z_store_barrier(masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register);
z_uncolor(_masm, this, $res$$Register);
z_uncolor(masm, this, $res$$Register);
%}
ins_pipe(pipe_slow);
@ -207,10 +207,10 @@ instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
ins_encode %{
guarantee($mem$$disp == 0, "impossible encoding");
Address ref_addr($mem$$Register);
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
z_color(masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
z_store_barrier(masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register);
z_uncolor(_masm, this, $res$$Register);
z_uncolor(masm, this, $res$$Register);
%}
ins_pipe(pipe_slow);
@ -226,9 +226,9 @@ instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp) %{
format %{ "atomic_xchg $prev, $newv, [$mem], #@zGetAndSetP" %}
ins_encode %{
z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, $tmp$$Register, true /* is_atomic */);
z_store_barrier(masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, $tmp$$Register, true /* is_atomic */);
__ atomic_xchg($prev$$Register, $prev$$Register, $mem$$Register);
z_uncolor(_masm, this, $prev$$Register);
z_uncolor(masm, this, $prev$$Register);
%}
ins_pipe(pipe_serial);
@ -244,9 +244,9 @@ instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp)
format %{ "atomic_xchg_acq $prev, $newv, [$mem], #@zGetAndSetPAcq" %}
ins_encode %{
z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, $tmp$$Register, true /* is_atomic */);
z_store_barrier(masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, $tmp$$Register, true /* is_atomic */);
__ atomic_xchgal($prev$$Register, $prev$$Register, $mem$$Register);
z_uncolor(_masm, this, $prev$$Register);
z_uncolor(masm, this, $prev$$Register);
%}
ins_pipe(pipe_serial);
%}

View File

@ -1060,8 +1060,8 @@ class HandlerImpl {
public:
static int emit_exception_handler(CodeBuffer &cbuf);
static int emit_deopt_handler(CodeBuffer& cbuf);
static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
return MacroAssembler::far_branch_size();
@ -1207,7 +1207,7 @@ bool needs_acquiring_load_reserved(const Node *n)
// so we can just return true here
return true;
}
#define __ _masm.
#define __ masm->
// advance declarations for helper functions to convert register
// indices to register objects
@ -1291,8 +1291,7 @@ void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
__ ebreak();
}
@ -1308,9 +1307,8 @@ uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
}
#endif
void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
C2_MacroAssembler _masm(&cbuf);
Assembler::CompressibleRegion cr(&_masm); // nops shall be 2-byte under RVC for alignment purposes.
void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
Assembler::CompressibleRegion cr(masm); // nops shall be 2-byte under RVC for alignment purposes.
for (int i = 0; i < _count; i++) {
__ nop();
}
@ -1332,7 +1330,7 @@ void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, Phase
ShouldNotReachHere();
}
void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
// Empty encoding
}
@ -1376,10 +1374,9 @@ void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
assert_cond(ra_ != nullptr);
Compile* C = ra_->C;
C2_MacroAssembler _masm(&cbuf);
// n.b. frame size includes space for return pc and fp
const int framesize = C->output()->frame_size_in_bytes();
@ -1387,7 +1384,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// insert a nop at the start of the prolog so we can patch in a
// branch if we need to invalidate the method later
{
Assembler::IncompressibleRegion ir(&_masm); // keep the nop as 4 bytes for patching.
Assembler::IncompressibleRegion ir(masm); // keep the nop as 4 bytes for patching.
MacroAssembler::assert_alignment(__ pc());
__ nop(); // 4 bytes
}
@ -1431,7 +1428,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
guard = &stub->guard();
}
// In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
bs->nmethod_entry_barrier(&_masm, slow_path, continuation, guard);
bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
}
}
@ -1439,7 +1436,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
Unimplemented();
}
C->output()->set_frame_complete(cbuf.insts_size());
C->output()->set_frame_complete(__ offset());
if (C->has_mach_constant_base_node()) {
// NOTE: We set the table base offset here because users might be
@ -1490,10 +1487,9 @@ void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
assert_cond(ra_ != nullptr);
Compile* C = ra_->C;
C2_MacroAssembler _masm(&cbuf);
assert_cond(C != nullptr);
int framesize = C->output()->frame_size_in_bytes();
@ -1567,7 +1563,7 @@ static enum RC rc_class(OptoReg::Name reg) {
return rc_stack;
}
uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
assert_cond(ra_ != nullptr);
Compile* C = ra_->C;
@ -1601,8 +1597,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (bottom_type()->isa_vect() != nullptr) {
uint ireg = ideal_reg();
if (ireg == Op_VecA && cbuf) {
C2_MacroAssembler _masm(cbuf);
if (ireg == Op_VecA && masm) {
int vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
// stack to stack
@ -1620,8 +1615,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
} else {
ShouldNotReachHere();
}
} else if (bottom_type()->isa_vectmask() && cbuf) {
C2_MacroAssembler _masm(cbuf);
} else if (bottom_type()->isa_vectmask() && masm) {
int vmask_size_in_bytes = Matcher::scalable_predicate_reg_slots() * 32 / 8;
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
// stack to stack
@ -1640,8 +1634,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
ShouldNotReachHere();
}
}
} else if (cbuf != nullptr) {
C2_MacroAssembler _masm(cbuf);
} else if (masm != nullptr) {
switch (src_lo_rc) {
case rc_int:
if (dst_lo_rc == rc_int) { // gpr --> gpr copy
@ -1753,8 +1746,8 @@ void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
implementation(&cbuf, ra_, false, nullptr);
void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
implementation(masm, ra_, false, nullptr);
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
@ -1773,9 +1766,8 @@ void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
Assembler::IncompressibleRegion ir(&_masm); // Fixed length: see BoxLockNode::size()
void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Assembler::IncompressibleRegion ir(masm); // Fixed length: see BoxLockNode::size()
assert_cond(ra_ != nullptr);
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
@ -1820,10 +1812,9 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
}
#endif
void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
{
// This is the unverified entry point.
C2_MacroAssembler _masm(&cbuf);
__ ic_check(CodeEntryAlignment);
// Verified entry point must be properly 4 bytes aligned for patching by NativeJump::patch_verified_entry().
@ -1842,13 +1833,12 @@ uint MachUEPNode::size(PhaseRegAlloc* ra_) const
//=============================================================================
// Emit exception handler code.
int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
{
// auipc t0, #exception_blob_entry_point
// jr (offset)t0
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@ -1862,11 +1852,8 @@ int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
}
// Emit deopt handler code.
int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
{
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_deopt_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@ -2201,14 +2188,12 @@ encode %{
// BEGIN Non-volatile memory access
enc_class riscv_enc_mov_imm(iRegIorL dst, immIorL src) %{
C2_MacroAssembler _masm(&cbuf);
int64_t con = (int64_t)$src$$constant;
Register dst_reg = as_Register($dst$$reg);
__ mv(dst_reg, con);
%}
enc_class riscv_enc_mov_p(iRegP dst, immP src) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
if (con == nullptr || con == (address)1) {
@ -2227,18 +2212,15 @@ encode %{
%}
enc_class riscv_enc_mov_p1(iRegP dst) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
__ mv(dst_reg, 1);
%}
enc_class riscv_enc_mov_byte_map_base(iRegP dst) %{
C2_MacroAssembler _masm(&cbuf);
__ load_byte_map_base($dst$$Register);
%}
enc_class riscv_enc_mov_n(iRegN dst, immN src) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
if (con == nullptr) {
@ -2251,13 +2233,11 @@ encode %{
%}
enc_class riscv_enc_mov_zero(iRegNorP dst) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
__ mv(dst_reg, zr);
%}
enc_class riscv_enc_mov_nk(iRegN dst, immNKlass src) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
if (con == nullptr) {
@ -2270,42 +2250,36 @@ encode %{
%}
enc_class riscv_enc_cmpxchgw(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
C2_MacroAssembler _masm(&cbuf);
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
/*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
enc_class riscv_enc_cmpxchgn(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
C2_MacroAssembler _masm(&cbuf);
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
/*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
enc_class riscv_enc_cmpxchg(iRegINoSp res, memory mem, iRegL oldval, iRegL newval) %{
C2_MacroAssembler _masm(&cbuf);
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
/*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
enc_class riscv_enc_cmpxchgw_acq(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
C2_MacroAssembler _masm(&cbuf);
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
/*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
enc_class riscv_enc_cmpxchgn_acq(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
C2_MacroAssembler _masm(&cbuf);
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
/*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
enc_class riscv_enc_cmpxchg_acq(iRegINoSp res, memory mem, iRegL oldval, iRegL newval) %{
C2_MacroAssembler _masm(&cbuf);
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
/*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
@ -2314,13 +2288,11 @@ encode %{
// compare and branch instruction encodings
enc_class riscv_enc_j(label lbl) %{
C2_MacroAssembler _masm(&cbuf);
Label* L = $lbl$$label;
__ j(*L);
%}
enc_class riscv_enc_far_cmpULtGe_imm0_branch(cmpOpULtGe cmp, iRegIorL op1, label lbl) %{
C2_MacroAssembler _masm(&cbuf);
Label* L = $lbl$$label;
switch ($cmp$$cmpcode) {
case(BoolTest::ge):
@ -2344,7 +2316,6 @@ encode %{
Label miss;
Label done;
C2_MacroAssembler _masm(&cbuf);
__ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
nullptr, &miss);
if ($primary) {
@ -2363,8 +2334,7 @@ encode %{
%}
enc_class riscv_enc_java_static_call(method meth) %{
C2_MacroAssembler _masm(&cbuf);
Assembler::IncompressibleRegion ir(&_masm); // Fixed length: see ret_addr_offset
Assembler::IncompressibleRegion ir(masm); // Fixed length: see ret_addr_offset
address addr = (address)$meth$$method;
address call = nullptr;
@ -2382,7 +2352,7 @@ encode %{
__ nop();
__ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
} else {
int method_index = resolved_method_index(cbuf);
int method_index = resolved_method_index(masm);
RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
: static_call_Relocation::spec(method_index);
call = __ trampoline_call(Address(addr, rspec));
@ -2394,10 +2364,10 @@ encode %{
if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
// Calls of the same statically bound method can share
// a stub to the interpreter.
cbuf.shared_stub_to_interp_for(_method, call - cbuf.insts_begin());
__ code()->shared_stub_to_interp_for(_method, call - (__ begin()));
} else {
// Emit stub for static call
address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, call);
address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@ -2409,9 +2379,8 @@ encode %{
%}
enc_class riscv_enc_java_dynamic_call(method meth) %{
C2_MacroAssembler _masm(&cbuf);
Assembler::IncompressibleRegion ir(&_masm); // Fixed length: see ret_addr_offset
int method_index = resolved_method_index(cbuf);
Assembler::IncompressibleRegion ir(masm); // Fixed length: see ret_addr_offset
int method_index = resolved_method_index(masm);
address call = __ ic_call((address)$meth$$method, method_index);
if (call == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@ -2422,7 +2391,6 @@ encode %{
%}
enc_class riscv_enc_call_epilog() %{
C2_MacroAssembler _masm(&cbuf);
if (VerifyStackAtCalls) {
// Check that stack depth is unchanged: find majik cookie on stack
__ call_Unimplemented();
@ -2430,8 +2398,7 @@ encode %{
%}
enc_class riscv_enc_java_to_runtime(method meth) %{
C2_MacroAssembler _masm(&cbuf);
Assembler::IncompressibleRegion ir(&_masm); // Fixed length: see ret_addr_offset
Assembler::IncompressibleRegion ir(masm); // Fixed length: see ret_addr_offset
// some calls to generated routines (arraycopy code) are scheduled
// by C2 as runtime calls. if so we can call them using a jr (they
@ -2463,7 +2430,6 @@ encode %{
// arithmetic encodings
enc_class riscv_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@ -2471,7 +2437,6 @@ encode %{
%}
enc_class riscv_enc_divuw(iRegI dst, iRegI src1, iRegI src2) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@ -2479,7 +2444,6 @@ encode %{
%}
enc_class riscv_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@ -2487,7 +2451,6 @@ encode %{
%}
enc_class riscv_enc_divu(iRegI dst, iRegI src1, iRegI src2) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@ -2495,7 +2458,6 @@ encode %{
%}
enc_class riscv_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@ -2503,7 +2465,6 @@ encode %{
%}
enc_class riscv_enc_moduw(iRegI dst, iRegI src1, iRegI src2) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@ -2511,7 +2472,6 @@ encode %{
%}
enc_class riscv_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@ -2519,7 +2479,6 @@ encode %{
%}
enc_class riscv_enc_modu(iRegI dst, iRegI src1, iRegI src2) %{
C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@ -2527,13 +2486,11 @@ encode %{
%}
enc_class riscv_enc_tail_call(iRegP jump_target) %{
C2_MacroAssembler _masm(&cbuf);
Register target_reg = as_Register($jump_target$$reg);
__ jr(target_reg);
%}
enc_class riscv_enc_tail_jmp(iRegP jump_target) %{
C2_MacroAssembler _masm(&cbuf);
Register target_reg = as_Register($jump_target$$reg);
// exception oop should be in x10
// ret addr has been popped into ra
@ -2543,12 +2500,10 @@ encode %{
%}
enc_class riscv_enc_rethrow() %{
C2_MacroAssembler _masm(&cbuf);
__ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
%}
enc_class riscv_enc_ret() %{
C2_MacroAssembler _masm(&cbuf);
__ ret();
%}

View File

@ -30,19 +30,19 @@ opclass vmemA(indirect);
source %{
static void loadStore(C2_MacroAssembler masm, bool is_store,
static void loadStore(C2_MacroAssembler* masm, bool is_store,
VectorRegister reg, BasicType bt, Register base,
uint vector_length, Assembler::VectorMask vm = Assembler::unmasked) {
Assembler::SEW sew = Assembler::elemtype_to_sew(bt);
masm.vsetvli_helper(bt, vector_length);
__ vsetvli_helper(bt, vector_length);
if (is_store) {
masm.vsex_v(reg, base, sew, vm);
__ vsex_v(reg, base, sew, vm);
} else {
if (vm == Assembler::v0_t) {
masm.vxor_vv(reg, reg, reg);
__ vxor_vv(reg, reg, reg);
}
masm.vlex_v(reg, base, sew, vm);
__ vlex_v(reg, base, sew, vm);
}
}
@ -108,7 +108,7 @@ instruct loadV(vReg dst, vmemA mem) %{
format %{ "loadV $dst, $mem\t# vector (rvv)" %}
ins_encode %{
VectorRegister dst_reg = as_VectorRegister($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), false, dst_reg,
loadStore(masm, false, dst_reg,
Matcher::vector_element_basic_type(this), as_Register($mem$$base), Matcher::vector_length(this));
%}
ins_pipe(pipe_slow);
@ -120,7 +120,7 @@ instruct storeV(vReg src, vmemA mem) %{
format %{ "storeV $mem, $src\t# vector (rvv)" %}
ins_encode %{
VectorRegister src_reg = as_VectorRegister($src$$reg);
loadStore(C2_MacroAssembler(&cbuf), true, src_reg,
loadStore(masm, true, src_reg,
Matcher::vector_element_basic_type(this, $src), as_Register($mem$$base), Matcher::vector_length(this, $src));
%}
ins_pipe(pipe_slow);
@ -3154,7 +3154,7 @@ instruct loadV_masked(vReg dst, vmemA mem, vRegMask_V0 v0) %{
format %{ "loadV_masked $dst, $mem, $v0" %}
ins_encode %{
VectorRegister dst_reg = as_VectorRegister($dst$$reg);
loadStore(C2_MacroAssembler(&cbuf), false, dst_reg,
loadStore(masm, false, dst_reg,
Matcher::vector_element_basic_type(this), as_Register($mem$$base),
Matcher::vector_length(this), Assembler::v0_t);
%}
@ -3166,7 +3166,7 @@ instruct storeV_masked(vReg src, vmemA mem, vRegMask_V0 v0) %{
format %{ "storeV_masked $mem, $src, $v0" %}
ins_encode %{
VectorRegister src_reg = as_VectorRegister($src$$reg);
loadStore(C2_MacroAssembler(&cbuf), true, src_reg,
loadStore(masm, true, src_reg,
Matcher::vector_element_basic_type(this, $src), as_Register($mem$$base),
Matcher::vector_length(this, $src), Assembler::v0_t);
%}

View File

@ -974,8 +974,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ j(exit);
CodeBuffer* cbuf = masm->code_section()->outer();
address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@ -1040,8 +1039,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ jr(x11); // the exception handler
}
CodeBuffer* cbuf = masm->code_section()->outer();
address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}

View File

@ -37,22 +37,18 @@
// ----------------------------------------------------------------------------
#undef __
#define __ _masm.
#define __ masm->
address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address mark/* = nullptr*/) {
#ifdef COMPILER2
// Stub is fixed up when the corresponding call is converted from calling
// compiled code to calling interpreted code.
if (mark == nullptr) {
// Get the mark within main instrs section which is set to the address of the call.
mark = cbuf.insts_mark();
mark = __ inst_mark();
}
assert(mark != nullptr, "mark must not be null");
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a stub.
MacroAssembler _masm(&cbuf);
address stub = __ start_a_stub(CompiledDirectCall::to_interp_stub_size());
if (stub == nullptr) {
return nullptr; // CodeBuffer::expand failed.

View File

@ -584,7 +584,7 @@ source %{
#define BIND(label) __ bind(label); BLOCK_COMMENT(#label ":")
#endif
#define __ _masm.
#define __ masm->
#define Z_DISP_SIZE Immediate::is_uimm12((long)opnd_array(1)->disp(ra_,this,2)) ? 4 : 6
#define Z_DISP3_SIZE 6
@ -666,14 +666,12 @@ int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
return (12 - current_offset) & 2;
}
void emit_nop(CodeBuffer &cbuf) {
C2_MacroAssembler _masm(&cbuf);
void emit_nop(C2_MacroAssembler *masm) {
__ z_nop();
}
// Emit an interrupt that is caught by the debugger (for debugging compiler).
void emit_break(CodeBuffer &cbuf) {
C2_MacroAssembler _masm(&cbuf);
void emit_break(C2_MacroAssembler *masm) {
__ z_illtrap();
}
@ -683,51 +681,45 @@ void MachBreakpointNode::format(PhaseRegAlloc *, outputStream *os) const {
}
#endif
void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
emit_break(cbuf);
void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
emit_break(masm);
}
uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
return MachNode::size(ra_);
}
static inline void z_emit16(CodeBuffer &cbuf, long value) {
C2_MacroAssembler _masm(&cbuf);
static inline void z_emit16(C2_MacroAssembler *masm, long value) {
__ emit_instruction((unsigned long)value, 2);
}
static inline void z_emit32(CodeBuffer &cbuf, long value) {
C2_MacroAssembler _masm(&cbuf);
static inline void z_emit32(C2_MacroAssembler *masm, long value) {
__ emit_instruction((unsigned long)value, 4);
}
static inline void z_emit48(CodeBuffer &cbuf, long value) {
C2_MacroAssembler _masm(&cbuf);
static inline void z_emit48(C2_MacroAssembler *masm, long value) {
__ emit_instruction((unsigned long)value, 6);
}
static inline unsigned int z_emit_inst(CodeBuffer &cbuf, long value) {
static inline unsigned int z_emit_inst(C2_MacroAssembler *masm, long value) {
if (value < 0) {
// There obviously has been an unintended sign extension (int->long). Revert it.
value = (long)((unsigned long)((unsigned int)value));
}
C2_MacroAssembler _masm(&cbuf);
int len = __ emit_instruction((unsigned long)value, 0);
return len;
}
// Check effective address (at runtime) for required alignment.
static inline void z_assert_aligned(CodeBuffer &cbuf, int disp, Register index, Register base, int alignment) {
C2_MacroAssembler _masm(&cbuf);
static inline void z_assert_aligned(C2_MacroAssembler *masm, int disp, Register index, Register base, int alignment) {
__ z_lay(Z_R0, disp, index, base);
__ z_nill(Z_R0, alignment-1);
__ z_brc(Assembler::bcondEqual, +3);
__ z_illtrap();
}
int emit_call_reloc(C2_MacroAssembler &_masm, intptr_t entry_point, relocInfo::relocType rtype,
int emit_call_reloc(C2_MacroAssembler *masm, intptr_t entry_point, relocInfo::relocType rtype,
PhaseRegAlloc* ra_, bool is_native_call = false) {
__ set_inst_mark(); // Used in z_enc_java_static_call() and emit_java_to_interp().
address old_mark = __ inst_mark();
@ -758,7 +750,7 @@ int emit_call_reloc(C2_MacroAssembler &_masm, intptr_t entry_point, relocInfo::r
return (ret_off - start_off);
}
static int emit_call_reloc(C2_MacroAssembler &_masm, intptr_t entry_point, RelocationHolder const& rspec) {
static int emit_call_reloc(C2_MacroAssembler *masm, intptr_t entry_point, RelocationHolder const& rspec) {
__ set_inst_mark(); // Used in z_enc_java_static_call() and emit_java_to_interp().
address old_mark = __ inst_mark();
unsigned int start_off = __ offset();
@ -790,8 +782,7 @@ void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, Phase
// Even with PC-relative TOC addressing, we still need this node.
// Float loads/stores do not support PC-relative addresses.
void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
C2_MacroAssembler _masm(&cbuf);
void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
Register Rtoc = as_Register(ra_->get_encode(this));
__ load_toc(Rtoc);
}
@ -841,9 +832,8 @@ void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
C2_MacroAssembler _masm(&cbuf);
size_t framesize = C->output()->frame_size_in_bytes();
size_t bangsize = C->output()->bang_size_in_bytes();
@ -892,10 +882,10 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
if (C->stub_function() == nullptr) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->nmethod_entry_barrier(&_masm);
bs->nmethod_entry_barrier(masm);
}
C->output()->set_frame_complete(cbuf.insts_size());
C->output()->set_frame_complete(__ offset());
}
uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
@ -921,8 +911,7 @@ void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *os) const {
}
#endif
void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
// If this does safepoint polling, then do it here.
@ -990,15 +979,15 @@ static enum RC rc_class(OptoReg::Name reg) {
}
// Returns size as obtained from z_emit_instr.
static unsigned int z_ld_st_helper(CodeBuffer *cbuf, const char *op_str, unsigned long opcode,
static unsigned int z_ld_st_helper(C2_MacroAssembler *masm, const char *op_str, unsigned long opcode,
int reg, int offset, bool do_print, outputStream *os) {
if (cbuf) {
if (masm) {
if (opcode > (1L<<32)) {
return z_emit_inst(*cbuf, opcode | Assembler::reg(Matcher::_regEncode[reg], 8, 48) |
return z_emit_inst(masm, opcode | Assembler::reg(Matcher::_regEncode[reg], 8, 48) |
Assembler::simm20(offset) | Assembler::reg(Z_R0, 12, 48) | Assembler::regz(Z_SP, 16, 48));
} else {
return z_emit_inst(*cbuf, opcode | Assembler::reg(Matcher::_regEncode[reg], 8, 32) |
return z_emit_inst(masm, opcode | Assembler::reg(Matcher::_regEncode[reg], 8, 32) |
Assembler::uimm12(offset, 20, 32) | Assembler::reg(Z_R0, 12, 32) | Assembler::regz(Z_SP, 16, 32));
}
}
@ -1011,9 +1000,8 @@ static unsigned int z_ld_st_helper(CodeBuffer *cbuf, const char *op_str, unsigne
return (opcode > (1L << 32)) ? 6 : 4;
}
static unsigned int z_mvc_helper(CodeBuffer *cbuf, int len, int dst_off, int src_off, bool do_print, outputStream *os) {
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
static unsigned int z_mvc_helper(C2_MacroAssembler *masm, int len, int dst_off, int src_off, bool do_print, outputStream *os) {
if (masm) {
__ z_mvc(dst_off, len-1, Z_SP, src_off, Z_SP);
}
@ -1026,7 +1014,7 @@ static unsigned int z_mvc_helper(CodeBuffer *cbuf, int len, int dst_off, int src
return 6;
}
uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *os) const {
uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *os) const {
// Get registers to move.
OptoReg::Name src_hi = ra_->get_reg_second(in(1));
OptoReg::Name src_lo = ra_->get_reg_first(in(1));
@ -1066,17 +1054,17 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
"expected same type of move for high parts");
if (src12 && dst12) {
return z_mvc_helper(cbuf, is64 ? 8 : 4, dst_offset, src_offset, print, os);
return z_mvc_helper(masm, is64 ? 8 : 4, dst_offset, src_offset, print, os);
}
int r0 = Z_R0_num;
if (is64) {
return z_ld_st_helper(cbuf, "LG ", LG_ZOPC, r0, src_offset, print, os) +
z_ld_st_helper(cbuf, "STG ", STG_ZOPC, r0, dst_offset, print, os);
return z_ld_st_helper(masm, "LG ", LG_ZOPC, r0, src_offset, print, os) +
z_ld_st_helper(masm, "STG ", STG_ZOPC, r0, dst_offset, print, os);
}
return z_ld_st_helper(cbuf, "LY ", LY_ZOPC, r0, src_offset, print, os) +
z_ld_st_helper(cbuf, "STY ", STY_ZOPC, r0, dst_offset, print, os);
return z_ld_st_helper(masm, "LY ", LY_ZOPC, r0, src_offset, print, os) +
z_ld_st_helper(masm, "STY ", STY_ZOPC, r0, dst_offset, print, os);
}
// Check for float->int copy. Requires a trip through memory.
@ -1086,8 +1074,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
// Check for integer reg-reg copy.
if (src_lo_rc == rc_int && dst_lo_rc == rc_int) {
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
if (masm) {
Register Rsrc = as_Register(Matcher::_regEncode[src_lo]);
Register Rdst = as_Register(Matcher::_regEncode[dst_lo]);
__ z_lgr(Rdst, Rsrc);
@ -1108,14 +1095,14 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
"expected same type of move for high parts");
if (is64) {
return z_ld_st_helper(cbuf, "STG ", STG_ZOPC, src_lo, dst_offset, print, os);
return z_ld_st_helper(masm, "STG ", STG_ZOPC, src_lo, dst_offset, print, os);
}
// else
mnemo = dst12 ? "ST " : "STY ";
opc = dst12 ? ST_ZOPC : STY_ZOPC;
return z_ld_st_helper(cbuf, mnemo, opc, src_lo, dst_offset, print, os);
return z_ld_st_helper(masm, mnemo, opc, src_lo, dst_offset, print, os);
}
// Check for integer load
@ -1128,13 +1115,12 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
mnemo = is64 ? "LG " : "LLGF";
opc = is64 ? LG_ZOPC : LLGF_ZOPC;
return z_ld_st_helper(cbuf, mnemo, opc, dst_lo, src_offset, print, os);
return z_ld_st_helper(masm, mnemo, opc, dst_lo, src_offset, print, os);
}
// Check for float reg-reg copy.
if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
if (masm) {
FloatRegister Rsrc = as_FloatRegister(Matcher::_regEncode[src_lo]);
FloatRegister Rdst = as_FloatRegister(Matcher::_regEncode[dst_lo]);
__ z_ldr(Rdst, Rsrc);
@ -1157,13 +1143,13 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (is64) {
mnemo = dst12 ? "STD " : "STDY ";
opc = dst12 ? STD_ZOPC : STDY_ZOPC;
return z_ld_st_helper(cbuf, mnemo, opc, src_lo, dst_offset, print, os);
return z_ld_st_helper(masm, mnemo, opc, src_lo, dst_offset, print, os);
}
// else
mnemo = dst12 ? "STE " : "STEY ";
opc = dst12 ? STE_ZOPC : STEY_ZOPC;
return z_ld_st_helper(cbuf, mnemo, opc, src_lo, dst_offset, print, os);
return z_ld_st_helper(masm, mnemo, opc, src_lo, dst_offset, print, os);
}
// Check for float load.
@ -1174,13 +1160,13 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (is64) {
mnemo = src12 ? "LD " : "LDY ";
opc = src12 ? LD_ZOPC : LDY_ZOPC;
return z_ld_st_helper(cbuf, mnemo, opc, dst_lo, src_offset, print, os);
return z_ld_st_helper(masm, mnemo, opc, dst_lo, src_offset, print, os);
}
// else
mnemo = src12 ? "LE " : "LEY ";
opc = src12 ? LE_ZOPC : LEY_ZOPC;
return z_ld_st_helper(cbuf, mnemo, opc, dst_lo, src_offset, print, os);
return z_ld_st_helper(masm, mnemo, opc, dst_lo, src_offset, print, os);
}
// --------------------------------------------------------------------
@ -1216,8 +1202,8 @@ void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *os) const {
}
#endif
void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
implementation(&cbuf, ra_, false, nullptr);
void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
implementation(masm, ra_, false, nullptr);
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
@ -1232,12 +1218,10 @@ void MachNopNode::format(PhaseRegAlloc *, outputStream *os) const {
}
#endif
void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ra_) const {
C2_MacroAssembler _masm(&cbuf);
void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc * ra_) const {
int rem_space = 0;
if (!(ra_->C->output()->in_scratch_emit_size())) {
rem_space = cbuf.insts()->remaining();
rem_space = __ code()->insts()->remaining();
if (rem_space <= _count*2 + 8) {
tty->print("NopNode: _count = %3.3d, remaining space before = %d", _count, rem_space);
}
@ -1249,7 +1233,7 @@ void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ra_) const {
if (!(ra_->C->output()->in_scratch_emit_size())) {
if (rem_space <= _count*2 + 8) {
int rem_space2 = cbuf.insts()->remaining();
int rem_space2 = __ code()->insts()->remaining();
tty->print_cr(", after = %d", rem_space2);
}
}
@ -1272,9 +1256,7 @@ void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *os) const {
#endif
// Take care of the size function, if you make changes here!
void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_encode(this);
__ z_lay(as_Register(reg), offset, Z_SP);
@ -1340,9 +1322,8 @@ void MachUEPNode::format(PhaseRegAlloc *ra_, outputStream *os) const {
}
#endif
void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
void MachUEPNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
// This is Unverified Entry Point
C2_MacroAssembler _masm(&cbuf);
__ ic_check(CodeEntryAlignment);
}
@ -1360,8 +1341,8 @@ source_hpp %{ // Header information of the source block.
class HandlerImpl {
public:
static int emit_exception_handler(CodeBuffer &cbuf);
static int emit_deopt_handler(CodeBuffer& cbuf);
static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
return NativeJump::max_instruction_size();
@ -1399,9 +1380,8 @@ source %{
// 3) The handler will get patched such that it does not jump to the
// exception blob, but to an entry in the deoptimization blob being
// aware of the exception.
int HandlerImpl::emit_exception_handler(CodeBuffer &cbuf) {
int HandlerImpl::emit_exception_handler(C2_MacroAssembler *masm) {
Register temp_reg = Z_R1;
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
@ -1422,8 +1402,7 @@ int HandlerImpl::emit_exception_handler(CodeBuffer &cbuf) {
}
// Emit deopt handler code.
int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
C2_MacroAssembler _masm(&cbuf);
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
address base = __ start_a_stub(size_deopt_handler());
if (base == nullptr) {
@ -1701,13 +1680,11 @@ bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack,
// needs for encoding need to be specified.
encode %{
enc_class enc_unimplemented %{
C2_MacroAssembler _masm(&cbuf);
__ unimplemented("Unimplemented mach node encoding in AD file.", 13);
%}
enc_class enc_untested %{
#ifdef ASSERT
C2_MacroAssembler _masm(&cbuf);
__ untested("Untested mach node encoding in AD file.");
#endif
%}
@ -1715,21 +1692,21 @@ encode %{
enc_class z_rrform(iRegI dst, iRegI src) %{
assert((($primary >> 14) & 0x03) == 0, "Instruction format error");
assert( ($primary >> 16) == 0, "Instruction format error");
z_emit16(cbuf, $primary |
z_emit16(masm, $primary |
Assembler::reg($dst$$reg,8,16) |
Assembler::reg($src$$reg,12,16));
%}
enc_class z_rreform(iRegI dst1, iRegI src2) %{
assert((($primary >> 30) & 0x03) == 2, "Instruction format error");
z_emit32(cbuf, $primary |
z_emit32(masm, $primary |
Assembler::reg($dst1$$reg,24,32) |
Assembler::reg($src2$$reg,28,32));
%}
enc_class z_rrfform(iRegI dst1, iRegI src2, iRegI src3) %{
assert((($primary >> 30) & 0x03) == 2, "Instruction format error");
z_emit32(cbuf, $primary |
z_emit32(masm, $primary |
Assembler::reg($dst1$$reg,24,32) |
Assembler::reg($src2$$reg,28,32) |
Assembler::reg($src3$$reg,16,32));
@ -1737,21 +1714,21 @@ encode %{
enc_class z_riform_signed(iRegI dst, immI16 src) %{
assert((($primary>>30) & 0x03) == 2, "Instruction format error");
z_emit32(cbuf, $primary |
z_emit32(masm, $primary |
Assembler::reg($dst$$reg,8,32) |
Assembler::simm16($src$$constant,16,32));
%}
enc_class z_riform_unsigned(iRegI dst, uimmI16 src) %{
assert((($primary>>30) & 0x03) == 2, "Instruction format error");
z_emit32(cbuf, $primary |
z_emit32(masm, $primary |
Assembler::reg($dst$$reg,8,32) |
Assembler::uimm16($src$$constant,16,32));
%}
enc_class z_rieform_d(iRegI dst1, iRegI src3, immI src2) %{
assert((($primary>>46) & 0x03) == 3, "Instruction format error");
z_emit48(cbuf, $primary |
z_emit48(masm, $primary |
Assembler::reg($dst1$$reg,8,48) |
Assembler::reg($src3$$reg,12,48) |
Assembler::simm16($src2$$constant,16,48));
@ -1759,27 +1736,27 @@ encode %{
enc_class z_rilform_signed(iRegI dst, immL32 src) %{
assert((($primary>>46) & 0x03) == 3, "Instruction format error");
z_emit48(cbuf, $primary |
z_emit48(masm, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::simm32($src$$constant,16,48));
%}
enc_class z_rilform_unsigned(iRegI dst, uimmL32 src) %{
assert((($primary>>46) & 0x03) == 3, "Instruction format error");
z_emit48(cbuf, $primary |
z_emit48(masm, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::uimm32($src$$constant,16,48));
%}
enc_class z_rsyform_const(iRegI dst, iRegI src1, immI src2) %{
z_emit48(cbuf, $primary |
z_emit48(masm, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::reg($src1$$reg,12,48) |
Assembler::simm20($src2$$constant));
%}
enc_class z_rsyform_reg_reg(iRegI dst, iRegI src, iRegI shft) %{
z_emit48(cbuf, $primary |
z_emit48(masm, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::reg($src$$reg,12,48) |
Assembler::reg($shft$$reg,16,48) |
@ -1788,7 +1765,7 @@ encode %{
enc_class z_rxform_imm_reg_reg(iRegL dst, immL con, iRegL src1, iRegL src2) %{
assert((($primary>>30) & 0x03) == 1, "Instruction format error");
z_emit32(cbuf, $primary |
z_emit32(masm, $primary |
Assembler::reg($dst$$reg,8,32) |
Assembler::reg($src1$$reg,12,32) |
Assembler::reg($src2$$reg,16,32) |
@ -1797,14 +1774,14 @@ encode %{
enc_class z_rxform_imm_reg(iRegL dst, immL con, iRegL src) %{
assert((($primary>>30) & 0x03) == 1, "Instruction format error");
z_emit32(cbuf, $primary |
z_emit32(masm, $primary |
Assembler::reg($dst$$reg,8,32) |
Assembler::reg($src$$reg,16,32) |
Assembler::uimm12($con$$constant,20,32));
%}
enc_class z_rxyform_imm_reg_reg(iRegL dst, immL con, iRegL src1, iRegL src2) %{
z_emit48(cbuf, $primary |
z_emit48(masm, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::reg($src1$$reg,12,48) |
Assembler::reg($src2$$reg,16,48) |
@ -1812,7 +1789,7 @@ encode %{
%}
enc_class z_rxyform_imm_reg(iRegL dst, immL con, iRegL src) %{
z_emit48(cbuf, $primary |
z_emit48(masm, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::reg($src$$reg,16,48) |
Assembler::simm20($con$$constant));
@ -1825,14 +1802,14 @@ encode %{
int con = $src$$constant;
assert(VM_Version::has_MemWithImmALUOps(), "unsupported CPU");
z_emit_inst(cbuf, $primary |
z_emit_inst(masm, $primary |
Assembler::regz(base,16,48) |
Assembler::simm20(disp) |
Assembler::simm8(con,8,48));
%}
enc_class z_silform(memoryRS mem, immI16 src) %{
z_emit_inst(cbuf, $primary |
z_emit_inst(masm, $primary |
Assembler::regz(reg_to_register_object($mem$$base),16,48) |
Assembler::uimm12($mem$$disp,20,48) |
Assembler::simm16($src$$constant,32,48));
@ -1843,13 +1820,13 @@ encode %{
Register Ridx = $mem$$index$$Register;
if (Ridx == noreg) { Ridx = Z_R0; } // Index is 0.
if ($primary > (1L << 32)) {
z_emit_inst(cbuf, $primary |
z_emit_inst(masm, $primary |
Assembler::reg($dst$$reg, 8, 48) |
Assembler::uimm12($mem$$disp, 20, 48) |
Assembler::reg(Ridx, 12, 48) |
Assembler::regz(reg_to_register_object($mem$$base), 16, 48));
} else {
z_emit_inst(cbuf, $primary |
z_emit_inst(masm, $primary |
Assembler::reg($dst$$reg, 8, 32) |
Assembler::uimm12($mem$$disp, 20, 32) |
Assembler::reg(Ridx, 12, 32) |
@ -1861,13 +1838,13 @@ encode %{
Register Ridx = $mem$$index$$Register;
if (Ridx == noreg) { Ridx = Z_R0; } // Index is 0.
if ($primary > (1L<<32)) {
z_emit_inst(cbuf, $primary |
z_emit_inst(masm, $primary |
Assembler::reg($dst$$reg, 8, 48) |
Assembler::simm20($mem$$disp) |
Assembler::reg(Ridx, 12, 48) |
Assembler::regz(reg_to_register_object($mem$$base), 16, 48));
} else {
z_emit_inst(cbuf, $primary |
z_emit_inst(masm, $primary |
Assembler::reg($dst$$reg, 8, 32) |
Assembler::uimm12($mem$$disp, 20, 32) |
Assembler::reg(Ridx, 12, 32) |
@ -1881,22 +1858,21 @@ encode %{
if (Ridx == noreg) { Ridx = Z_R0; } // Index is 0.
if (Displacement::is_shortDisp((long)$mem$$disp)) {
z_emit_inst(cbuf, $secondary |
z_emit_inst(masm, $secondary |
Assembler::reg($dst$$reg, 8, isize) |
Assembler::uimm12($mem$$disp, 20, isize) |
Assembler::reg(Ridx, 12, isize) |
Assembler::regz(reg_to_register_object($mem$$base), 16, isize));
} else if (Displacement::is_validDisp((long)$mem$$disp)) {
z_emit_inst(cbuf, $primary |
z_emit_inst(masm, $primary |
Assembler::reg($dst$$reg, 8, 48) |
Assembler::simm20($mem$$disp) |
Assembler::reg(Ridx, 12, 48) |
Assembler::regz(reg_to_register_object($mem$$base), 16, 48));
} else {
C2_MacroAssembler _masm(&cbuf);
__ load_const_optimized(Z_R1_scratch, $mem$$disp);
if (Ridx != Z_R0) { __ z_agr(Z_R1_scratch, Ridx); }
z_emit_inst(cbuf, $secondary |
z_emit_inst(masm, $secondary |
Assembler::reg($dst$$reg, 8, isize) |
Assembler::uimm12(0, 20, isize) |
Assembler::reg(Z_R1_scratch, 12, isize) |
@ -1905,7 +1881,6 @@ encode %{
%}
enc_class z_enc_brul(Label lbl) %{
C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `nullptr' when this encoding class is used only to
@ -1918,7 +1893,6 @@ encode %{
%}
enc_class z_enc_bru(Label lbl) %{
C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `nullptr' when this encoding class is used only to
@ -1931,7 +1905,6 @@ encode %{
%}
enc_class z_enc_branch_con_far(cmpOp cmp, Label lbl) %{
C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `nullptr' when this encoding class is used only to
@ -1944,7 +1917,6 @@ encode %{
%}
enc_class z_enc_branch_con_short(cmpOp cmp, Label lbl) %{
C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `nullptr' when this encoding class is used only to
@ -1957,7 +1929,6 @@ encode %{
%}
enc_class z_enc_cmpb_regreg(iRegI src1, iRegI src2, Label lbl, cmpOpT cmp) %{
C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `nullptr' when this encoding class is used only to
@ -1981,7 +1952,6 @@ encode %{
%}
enc_class z_enc_cmpb_regregFar(iRegI src1, iRegI src2, Label lbl, cmpOpT cmp) %{
C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `nullptr' when this encoding class is used only to
@ -2007,7 +1977,6 @@ encode %{
%}
enc_class z_enc_cmpb_regimm(iRegI src1, immI8 src2, Label lbl, cmpOpT cmp) %{
C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `nullptr' when this encoding class is used only to
@ -2032,7 +2001,6 @@ encode %{
%}
enc_class z_enc_cmpb_regimmFar(iRegI src1, immI8 src2, Label lbl, cmpOpT cmp) %{
C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `nullptr' when this encoding class is used only to
@ -2059,8 +2027,6 @@ encode %{
// Call from Java to runtime.
enc_class z_enc_java_to_runtime_call(method meth) %{
C2_MacroAssembler _masm(&cbuf);
// Save return pc before call to the place where we need it, since
// callee doesn't.
unsigned int start_off = __ offset();
@ -2087,36 +2053,37 @@ encode %{
enc_class z_enc_java_static_call(method meth) %{
// Call to fixup routine. Fixup routine uses ScopeDesc info to determine
// whom we intended to call.
C2_MacroAssembler _masm(&cbuf);
int ret_offset = 0;
if (!_method) {
ret_offset = emit_call_reloc(_masm, $meth$$method,
ret_offset = emit_call_reloc(masm, $meth$$method,
relocInfo::runtime_call_w_cp_type, ra_);
} else {
int method_index = resolved_method_index(cbuf);
int method_index = resolved_method_index(masm);
if (_optimized_virtual) {
ret_offset = emit_call_reloc(_masm, $meth$$method,
ret_offset = emit_call_reloc(masm, $meth$$method,
opt_virtual_call_Relocation::spec(method_index));
} else {
ret_offset = emit_call_reloc(_masm, $meth$$method,
ret_offset = emit_call_reloc(masm, $meth$$method,
static_call_Relocation::spec(method_index));
}
}
assert(__ inst_mark() != nullptr, "emit_call_reloc must set_inst_mark()");
if (_method) { // Emit stub for static call.
address stub = CompiledDirectCall::emit_to_interp_stub(cbuf);
address stub = CompiledDirectCall::emit_to_interp_stub(masm);
if (stub == nullptr) {
__ clear_inst_mark();
ciEnv::current()->record_failure("CodeCache is full");
return;
}
}
__ clear_inst_mark();
%}
// Java dynamic call
enc_class z_enc_java_dynamic_call(method meth) %{
C2_MacroAssembler _masm(&cbuf);
unsigned int start_off = __ offset();
int vtable_index = this->_vtable_index;
@ -2134,11 +2101,12 @@ encode %{
// Call to fixup routine. Fixup routine uses ScopeDesc info
// to determine who we intended to call.
int method_index = resolved_method_index(cbuf);
int method_index = resolved_method_index(masm);
__ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr, method_index));
unsigned int ret_off = __ offset();
assert(__ offset() - start_off == 6, "bad prelude len: %d", __ offset() - start_off);
ret_off += emit_call_reloc(_masm, $meth$$method, relocInfo::none, ra_);
ret_off += emit_call_reloc(masm, $meth$$method, relocInfo::none, ra_);
__ clear_inst_mark();
assert(_method, "lazy_constant may be wrong when _method==null");
} else {
assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
@ -2171,7 +2139,6 @@ encode %{
%}
enc_class z_enc_cmov_reg(cmpOp cmp, iRegI dst, iRegI src) %{
C2_MacroAssembler _masm(&cbuf);
Register Rdst = reg_to_register_object($dst$$reg);
Register Rsrc = reg_to_register_object($src$$reg);
@ -2192,7 +2159,6 @@ encode %{
%}
enc_class z_enc_cmov_imm(cmpOp cmp, iRegI dst, immI16 src) %{
C2_MacroAssembler _masm(&cbuf);
Register Rdst = reg_to_register_object($dst$$reg);
int Csrc = $src$$constant;
Assembler::branch_condition cc = (Assembler::branch_condition)$cmp$$cmpcode;
@ -2209,7 +2175,6 @@ encode %{
%}
enc_class z_enc_cctobool(iRegI res) %{
C2_MacroAssembler _masm(&cbuf);
Register Rres = reg_to_register_object($res$$reg);
if (VM_Version::has_LoadStoreConditional()) {
@ -2226,7 +2191,6 @@ encode %{
%}
enc_class z_enc_casI(iRegI compare_value, iRegI exchange_value, iRegP addr_ptr) %{
C2_MacroAssembler _masm(&cbuf);
Register Rcomp = reg_to_register_object($compare_value$$reg);
Register Rnew = reg_to_register_object($exchange_value$$reg);
Register Raddr = reg_to_register_object($addr_ptr$$reg);
@ -2235,7 +2199,6 @@ encode %{
%}
enc_class z_enc_casL(iRegL compare_value, iRegL exchange_value, iRegP addr_ptr) %{
C2_MacroAssembler _masm(&cbuf);
Register Rcomp = reg_to_register_object($compare_value$$reg);
Register Rnew = reg_to_register_object($exchange_value$$reg);
Register Raddr = reg_to_register_object($addr_ptr$$reg);
@ -2244,7 +2207,6 @@ encode %{
%}
enc_class z_enc_SwapI(memoryRSY mem, iRegI dst, iRegI tmp) %{
C2_MacroAssembler _masm(&cbuf);
Register Rdst = reg_to_register_object($dst$$reg);
Register Rtmp = reg_to_register_object($tmp$$reg);
guarantee(Rdst != Rtmp, "Fix match rule to use TEMP_DEF");
@ -2260,7 +2222,6 @@ encode %{
%}
enc_class z_enc_SwapL(memoryRSY mem, iRegL dst, iRegL tmp) %{
C2_MacroAssembler _masm(&cbuf);
Register Rdst = reg_to_register_object($dst$$reg);
Register Rtmp = reg_to_register_object($tmp$$reg);
guarantee(Rdst != Rtmp, "Fix match rule to use TEMP_DEF");
@ -9558,9 +9519,10 @@ instruct RethrowException() %{
// TODO: s390 port size(VARIABLE_SIZE);
format %{ "Jmp rethrow_stub" %}
ins_encode %{
cbuf.set_insts_mark();
__ set_inst_mark();
__ load_const_optimized(Z_R1_scratch, (address)OptoRuntime::rethrow_stub());
__ z_br(Z_R1_scratch);
__ clear_inst_mark();
%}
ins_pipe(pipe_class_dummy);
%}

View File

@ -4260,6 +4260,7 @@ void Assembler::vpermb(XMMRegister dst, XMMRegister nds, XMMRegister src, int ve
void Assembler::vpermb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
assert(VM_Version::supports_avx512_vbmi(), "");
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_is_evex_instruction();
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
@ -4695,6 +4696,7 @@ void Assembler::pextrd(Register dst, XMMRegister src, int imm8) {
void Assembler::pextrd(Address dst, XMMRegister src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@ -4712,6 +4714,7 @@ void Assembler::pextrq(Register dst, XMMRegister src, int imm8) {
void Assembler::pextrq(Address dst, XMMRegister src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@ -4729,6 +4732,7 @@ void Assembler::pextrw(Register dst, XMMRegister src, int imm8) {
void Assembler::pextrw(Address dst, XMMRegister src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@ -4746,6 +4750,7 @@ void Assembler::pextrb(Register dst, XMMRegister src, int imm8) {
void Assembler::pextrb(Address dst, XMMRegister src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@ -4763,6 +4768,7 @@ void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
void Assembler::pinsrd(XMMRegister dst, Address src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@ -4787,6 +4793,7 @@ void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) {
void Assembler::pinsrq(XMMRegister dst, Address src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@ -4811,6 +4818,7 @@ void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) {
void Assembler::pinsrw(XMMRegister dst, Address src, int imm8) {
assert(VM_Version::supports_sse2(), "");
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@ -4828,6 +4836,7 @@ void Assembler::vpinsrw(XMMRegister dst, XMMRegister nds, Register src, int imm8
void Assembler::pinsrb(XMMRegister dst, Address src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@ -13318,21 +13327,25 @@ void Assembler::decq(Address dst) {
}
void Assembler::fxrstor(Address src) {
InstructionMark im(this);
emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE);
emit_operand(as_Register(1), src, 0);
}
void Assembler::xrstor(Address src) {
InstructionMark im(this);
emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE);
emit_operand(as_Register(5), src, 0);
}
void Assembler::fxsave(Address dst) {
InstructionMark im(this);
emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE);
emit_operand(as_Register(0), dst, 0);
}
void Assembler::xsave(Address dst) {
InstructionMark im(this);
emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE);
emit_operand(as_Register(4), dst, 0);
}

View File

@ -114,13 +114,13 @@ int IntelJccErratum::compute_padding(uintptr_t current_offset, const MachNode* m
}
}
#define __ _masm.
#define __ _masm->
uintptr_t IntelJccErratumAlignment::pc() {
return (uintptr_t)__ pc();
}
IntelJccErratumAlignment::IntelJccErratumAlignment(MacroAssembler& masm, int jcc_size) :
IntelJccErratumAlignment::IntelJccErratumAlignment(MacroAssembler* masm, int jcc_size) :
_masm(masm),
_start_pc(pc()) {
if (!VM_Version::has_intel_jcc_erratum()) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,13 +54,13 @@ public:
class IntelJccErratumAlignment {
private:
MacroAssembler& _masm;
MacroAssembler* _masm;
uintptr_t _start_pc;
uintptr_t pc();
public:
IntelJccErratumAlignment(MacroAssembler& masm, int jcc_size);
IntelJccErratumAlignment(MacroAssembler* masm, int jcc_size);
~IntelJccErratumAlignment();
};

View File

@ -34,21 +34,17 @@
// ----------------------------------------------------------------------------
#define __ _masm.
address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
#define __ masm->
address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address mark) {
// Stub is fixed up when the corresponding call is converted from
// calling compiled code to calling interpreted code.
// movq rbx, 0
// jmp -5 # to self
if (mark == nullptr) {
mark = cbuf.insts_mark(); // Get mark within main instrs section.
mark = __ inst_mark(); // Get mark within main instrs section.
}
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a stub.
MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(to_interp_stub_size());
if (base == nullptr) {
return nullptr; // CodeBuffer::expand failed.

View File

@ -40,7 +40,7 @@ instruct compareAndSwapP_shenandoah(rRegI res,
format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm,
$res$$Register, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
false, // swap
$tmp1$$Register, $tmp2$$Register
@ -61,7 +61,7 @@ instruct compareAndExchangeP_shenandoah(memory mem_ptr,
format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm,
noreg, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
true, // exchange
$tmp1$$Register, $tmp2$$Register

View File

@ -40,7 +40,7 @@ instruct compareAndSwapP_shenandoah(rRegI res,
format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm,
$res$$Register, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
false, // swap
$tmp1$$Register, $tmp2$$Register
@ -61,7 +61,7 @@ instruct compareAndSwapN_shenandoah(rRegI res,
format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm,
$res$$Register, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
false, // swap
$tmp1$$Register, $tmp2$$Register
@ -80,7 +80,7 @@ instruct compareAndExchangeN_shenandoah(memory mem_ptr,
format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm,
noreg, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
true, // exchange
$tmp1$$Register, $tmp2$$Register
@ -101,7 +101,7 @@ instruct compareAndExchangeP_shenandoah(memory mem_ptr,
format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm,
noreg, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
true, // exchange
$tmp1$$Register, $tmp2$$Register

View File

@ -375,7 +375,7 @@ OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::N
}
// We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel
extern void vec_spill_helper(CodeBuffer *cbuf, bool is_load,
extern void vec_spill_helper(C2_MacroAssembler *masm, bool is_load,
int stack_offset, int reg, uint ireg, outputStream* st);
#undef __
@ -437,13 +437,15 @@ private:
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
_spill_offset -= reg_data._size;
vec_spill_helper(__ code(), false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
C2_MacroAssembler c2_masm(__ code());
vec_spill_helper(&c2_masm, false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
}
void xmm_register_restore(const XMMRegisterData& reg_data) {
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
vec_spill_helper(__ code(), true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
C2_MacroAssembler c2_masm(__ code());
vec_spill_helper(&c2_masm, true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
_spill_offset += reg_data._size;
}

View File

@ -1,5 +1,5 @@
//
// Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -33,34 +33,34 @@ source %{
#include "c2_intelJccErratum_x86.hpp"
static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
if (barrier_data == XLoadBarrierElided) {
return;
}
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
{
IntelJccErratumAlignment intel_alignment(_masm, 10 /* jcc_size */);
IntelJccErratumAlignment intel_alignment(masm, 10 /* jcc_size */);
__ testptr(ref, Address(r15_thread, XThreadLocalData::address_bad_mask_offset()));
__ jcc(Assembler::notZero, *stub->entry());
}
__ bind(*stub->continuation());
}
static void x_load_barrier_cmpxchg(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, Label& good) {
static void x_load_barrier_cmpxchg(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, Label& good) {
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
{
IntelJccErratumAlignment intel_alignment(_masm, 10 /* jcc_size */);
IntelJccErratumAlignment intel_alignment(masm, 10 /* jcc_size */);
__ testptr(ref, Address(r15_thread, XThreadLocalData::address_bad_mask_offset()));
__ jcc(Assembler::zero, good);
}
{
IntelJccErratumAlignment intel_alignment(_masm, 5 /* jcc_size */);
IntelJccErratumAlignment intel_alignment(masm, 5 /* jcc_size */);
__ jmp(*stub->entry());
}
__ bind(*stub->continuation());
}
static void x_cmpxchg_common(MacroAssembler& _masm, const MachNode* node, Register mem_reg, Register newval, Register tmp) {
static void x_cmpxchg_common(MacroAssembler* masm, const MachNode* node, Register mem_reg, Register newval, Register tmp) {
// Compare value (oldval) is in rax
const Address mem = Address(mem_reg, 0);
@ -73,7 +73,7 @@ static void x_cmpxchg_common(MacroAssembler& _masm, const MachNode* node, Regist
if (node->barrier_data() != XLoadBarrierElided) {
Label good;
x_load_barrier_cmpxchg(_masm, node, mem, rax, tmp, good);
x_load_barrier_cmpxchg(masm, node, mem, rax, tmp, good);
__ movptr(rax, tmp);
__ lock();
__ cmpxchgptr(newval, mem);
@ -96,7 +96,7 @@ instruct xLoadP(rRegP dst, memory mem, rFlagsReg cr)
ins_encode %{
__ movptr($dst$$Register, $mem$$Address);
x_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, barrier_data());
x_load_barrier(masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, barrier_data());
%}
ins_pipe(ialu_reg_mem);
@ -112,7 +112,7 @@ instruct xCompareAndExchangeP(indirect mem, rax_RegP oldval, rRegP newval, rRegP
ins_encode %{
precond($oldval$$Register == rax);
x_cmpxchg_common(_masm, this, $mem$$Register, $newval$$Register, $tmp$$Register);
x_cmpxchg_common(masm, this, $mem$$Register, $newval$$Register, $tmp$$Register);
%}
ins_pipe(pipe_cmpxchg);
@ -131,7 +131,7 @@ instruct xCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rFla
ins_encode %{
precond($oldval$$Register == rax);
x_cmpxchg_common(_masm, this, $mem$$Register, $newval$$Register, $tmp$$Register);
x_cmpxchg_common(masm, this, $mem$$Register, $newval$$Register, $tmp$$Register);
if (barrier_data() != XLoadBarrierElided) {
__ cmpptr($tmp$$Register, rax);
}
@ -151,7 +151,7 @@ instruct xXChgP(indirect mem, rRegP newval, rFlagsReg cr) %{
ins_encode %{
__ xchgptr($newval$$Register, Address($mem$$Register, 0));
x_load_barrier(_masm, this, Address(noreg, 0), $newval$$Register, noreg /* tmp */, barrier_data());
x_load_barrier(masm, this, Address(noreg, 0), $newval$$Register, noreg /* tmp */, barrier_data());
%}
ins_pipe(pipe_cmpxchg);

View File

@ -356,7 +356,7 @@ static void emit_store_fast_path_check_c2(MacroAssembler* masm, Address ref_addr
// This is a JCC erratum mitigation wrapper for calling the inner check
int size = store_fast_path_check_size(masm, ref_addr, is_atomic, medium_path);
// Emit JCC erratum mitigation nops with the right size
IntelJccErratumAlignment intel_alignment(*masm, size);
IntelJccErratumAlignment intel_alignment(masm, size);
// Emit the JCC erratum mitigation guarded code
emit_store_fast_path_check(masm, ref_addr, is_atomic, medium_path);
#endif
@ -1184,7 +1184,7 @@ OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::N
}
// We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel
extern void vec_spill_helper(CodeBuffer *cbuf, bool is_load,
extern void vec_spill_helper(C2_MacroAssembler *masm, bool is_load,
int stack_offset, int reg, uint ireg, outputStream* st);
#undef __
@ -1246,13 +1246,15 @@ private:
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
_spill_offset -= reg_data._size;
vec_spill_helper(__ code(), false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
C2_MacroAssembler c2_masm(__ code());
vec_spill_helper(&c2_masm, false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
}
void xmm_register_restore(const XMMRegisterData& reg_data) {
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
vec_spill_helper(__ code(), true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
C2_MacroAssembler c2_masm(__ code());
vec_spill_helper(&c2_masm, true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
_spill_offset += reg_data._size;
}

View File

@ -34,66 +34,66 @@ source %{
#include "c2_intelJccErratum_x86.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"
static void z_color(MacroAssembler& _masm, const MachNode* node, Register ref) {
static void z_color(MacroAssembler* masm, const MachNode* node, Register ref) {
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeShl);
__ shlq(ref, barrier_Relocation::unpatched);
__ orq_imm32(ref, barrier_Relocation::unpatched);
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodAfterOr);
}
static void z_uncolor(MacroAssembler& _masm, const MachNode* node, Register ref) {
static void z_uncolor(MacroAssembler* masm, const MachNode* node, Register ref) {
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeShl);
__ shrq(ref, barrier_Relocation::unpatched);
}
static void z_keep_alive_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref) {
static void z_keep_alive_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref) {
__ Assembler::testl(ref, barrier_Relocation::unpatched);
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadAfterTest);
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
__ jcc(Assembler::notEqual, *stub->entry());
z_uncolor(_masm, node, ref);
z_uncolor(masm, node, ref);
__ bind(*stub->continuation());
}
static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref) {
Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
static void z_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref) {
Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
const bool on_non_strong =
((node->barrier_data() & ZBarrierWeak) != 0) ||
((node->barrier_data() & ZBarrierPhantom) != 0);
if (on_non_strong) {
z_keep_alive_load_barrier(_masm, node, ref_addr, ref);
z_keep_alive_load_barrier(masm, node, ref_addr, ref);
return;
}
z_uncolor(_masm, node, ref);
z_uncolor(masm, node, ref);
if (node->barrier_data() == ZBarrierElided) {
return;
}
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
{
IntelJccErratumAlignment intel_alignment(_masm, 6);
IntelJccErratumAlignment intel_alignment(masm, 6);
__ jcc(Assembler::above, *stub->entry());
}
__ bind(*stub->continuation());
}
static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, bool is_atomic) {
Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, bool is_atomic) {
Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
if (node->barrier_data() == ZBarrierElided) {
if (rnew_zaddress != noreg) {
// noreg means null; no need to color
__ movptr(rnew_zpointer, rnew_zaddress);
z_color(_masm, node, rnew_zpointer);
z_color(masm, node, rnew_zpointer);
}
} else {
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic);
ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler();
bs_asm->store_barrier_fast(&_masm, ref_addr, rnew_zaddress, rnew_zpointer, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
bs_asm->store_barrier_fast(masm, ref_addr, rnew_zaddress, rnew_zpointer, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
}
}
@ -124,7 +124,7 @@ instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
ins_encode %{
__ movptr($dst$$Register, $mem$$Address);
z_load_barrier(_masm, this, $mem$$Address, $dst$$Register);
z_load_barrier(masm, this, $mem$$Address, $dst$$Register);
%}
ins_pipe(ialu_reg_mem);
@ -156,7 +156,7 @@ instruct zStoreP(memory mem, any_RegP src, rRegP tmp, rFlagsReg cr)
ins_cost(125); // XXX
format %{ "movq $mem, $src\t# ptr" %}
ins_encode %{
z_store_barrier(_masm, this, $mem$$Address, $src$$Register, $tmp$$Register, false /* is_atomic */);
z_store_barrier(masm, this, $mem$$Address, $src$$Register, $tmp$$Register, false /* is_atomic */);
__ movq($mem$$Address, $tmp$$Register);
%}
ins_pipe(ialu_mem_reg);
@ -172,7 +172,7 @@ instruct zStorePNull(memory mem, immP0 zero, rRegP tmp, rFlagsReg cr)
ins_cost(125); // XXX
format %{ "movq $mem, 0\t# ptr" %}
ins_encode %{
z_store_barrier(_masm, this, $mem$$Address, noreg, $tmp$$Register, false /* is_atomic */);
z_store_barrier(masm, this, $mem$$Address, noreg, $tmp$$Register, false /* is_atomic */);
// Store a colored null - barrier code above does not need to color
__ movq($mem$$Address, barrier_Relocation::unpatched);
// The relocation cant be fully after the mov, as that is the beginning of a random subsequent
@ -194,11 +194,11 @@ instruct zCompareAndExchangeP(indirect mem, no_rax_RegP newval, rRegP tmp, rax_R
assert_different_registers($oldval$$Register, $mem$$Register);
assert_different_registers($oldval$$Register, $newval$$Register);
const Address mem_addr = Address($mem$$Register, 0);
z_store_barrier(_masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
z_color(_masm, this, $oldval$$Register);
z_store_barrier(masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
z_color(masm, this, $oldval$$Register);
__ lock();
__ cmpxchgptr($tmp$$Register, mem_addr);
z_uncolor(_masm, this, $oldval$$Register);
z_uncolor(masm, this, $oldval$$Register);
%}
ins_pipe(pipe_cmpxchg);
@ -218,8 +218,8 @@ instruct zCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rax_
ins_encode %{
assert_different_registers($oldval$$Register, $mem$$Register);
const Address mem_addr = Address($mem$$Register, 0);
z_store_barrier(_masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
z_color(_masm, this, $oldval$$Register);
z_store_barrier(masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
z_color(masm, this, $oldval$$Register);
__ lock();
__ cmpxchgptr($tmp$$Register, mem_addr);
__ setb(Assembler::equal, $res$$Register);
@ -239,10 +239,10 @@ instruct zXChgP(indirect mem, rRegP newval, rRegP tmp, rFlagsReg cr) %{
ins_encode %{
assert_different_registers($mem$$Register, $newval$$Register);
const Address mem_addr = Address($mem$$Register, 0);
z_store_barrier(_masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
z_store_barrier(masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
__ movptr($newval$$Register, $tmp$$Register);
__ xchgptr($newval$$Register, mem_addr);
z_uncolor(_masm, this, $newval$$Register);
z_uncolor(masm, this, $newval$$Register);
%}
ins_pipe(pipe_cmpxchg);

View File

@ -1441,8 +1441,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
// Make sure the call is patchable
__ align(BytesPerWord, __ offset() + NativeCall::displacement_offset);
// Emit stub for static call
CodeBuffer* cbuf = masm->code_section()->outer();
address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, __ pc());
address stub = CompiledDirectCall::emit_to_interp_stub(masm, __ pc());
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@ -1478,8 +1477,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ align(BytesPerWord, __ offset() + NativeCall::displacement_offset);
// Emit stub for static call
CodeBuffer* cbuf = masm->code_section()->outer();
address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, __ pc());
address stub = CompiledDirectCall::emit_to_interp_stub(masm, __ pc());
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}

View File

@ -1187,8 +1187,8 @@ class HandlerImpl {
public:
static int emit_exception_handler(CodeBuffer &cbuf);
static int emit_deopt_handler(CodeBuffer& cbuf);
static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
// NativeCall instruction size is the same as NativeJump.
@ -1306,11 +1306,10 @@ int MachNode::compute_padding(int current_offset) const {
// Emit exception handler code.
// Stuff framesize into a register and call a VM stub routine.
int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm) {
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@ -1324,11 +1323,10 @@ int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
}
// Emit deopt handler code.
int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_deopt_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@ -2523,14 +2521,13 @@ static inline Assembler::ComparisonPredicateFP booltest_pred_to_comparison_pred_
}
// Helper methods for MachSpillCopyNode::implementation().
static void vec_mov_helper(CodeBuffer *cbuf, int src_lo, int dst_lo,
static void vec_mov_helper(C2_MacroAssembler *masm, int src_lo, int dst_lo,
int src_hi, int dst_hi, uint ireg, outputStream* st) {
assert(ireg == Op_VecS || // 32bit vector
((src_lo & 1) == 0 && (src_lo + 1) == src_hi &&
(dst_lo & 1) == 0 && (dst_lo + 1) == dst_hi),
"no non-adjacent vector moves" );
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
if (masm) {
switch (ireg) {
case Op_VecS: // copy whole register
case Op_VecD:
@ -2581,10 +2578,9 @@ static void vec_mov_helper(CodeBuffer *cbuf, int src_lo, int dst_lo,
}
}
void vec_spill_helper(CodeBuffer *cbuf, bool is_load,
void vec_spill_helper(C2_MacroAssembler *masm, bool is_load,
int stack_offset, int reg, uint ireg, outputStream* st) {
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
if (masm) {
if (is_load) {
switch (ireg) {
case Op_VecS:
@ -2742,8 +2738,7 @@ static inline jlong high_bit_set(BasicType bt) {
}
#endif
void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
C2_MacroAssembler _masm(&cbuf);
void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
__ nop(_count);
}
@ -2757,8 +2752,7 @@ static inline jlong high_bit_set(BasicType bt) {
}
#endif
void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
C2_MacroAssembler _masm(&cbuf);
void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc* ra_) const {
__ int3();
}
@ -2771,7 +2765,6 @@ static inline jlong high_bit_set(BasicType bt) {
encode %{
enc_class call_epilog %{
C2_MacroAssembler _masm(&cbuf);
if (VerifyStackAtCalls) {
// Check that stack depth is unchanged: find majik cookie on stack
int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word));

File diff suppressed because it is too large Load Diff

View File

@ -358,7 +358,7 @@ source %{
#define RELOC_IMM64 Assembler::imm_operand
#define RELOC_DISP32 Assembler::disp32_operand
#define __ _masm.
#define __ masm->
RegMask _ANY_REG_mask;
RegMask _PTR_REG_mask;
@ -519,7 +519,7 @@ int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
}
// This could be in MacroAssembler but it's fairly C2 specific
static void emit_cmpfp_fixup(MacroAssembler& _masm) {
static void emit_cmpfp_fixup(MacroAssembler* masm) {
Label exit;
__ jccb(Assembler::noParity, exit);
__ pushf();
@ -539,7 +539,7 @@ static void emit_cmpfp_fixup(MacroAssembler& _masm) {
__ bind(exit);
}
static void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
static void emit_cmpfp3(MacroAssembler* masm, Register dst) {
Label done;
__ movl(dst, -1);
__ jcc(Assembler::parity, done);
@ -558,7 +558,7 @@ static void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
// je #
// |-jz -> a | b # a & b
// | -> a #
static void emit_fp_min_max(MacroAssembler& _masm, XMMRegister dst,
static void emit_fp_min_max(MacroAssembler* masm, XMMRegister dst,
XMMRegister a, XMMRegister b,
XMMRegister xmmt, Register rt,
bool min, bool single) {
@ -643,7 +643,7 @@ void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, Phase
ShouldNotReachHere();
}
void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
// Empty encoding
}
@ -719,9 +719,8 @@ void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
}
#endif
void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
C2_MacroAssembler _masm(&cbuf);
int framesize = C->output()->frame_size_in_bytes();
int bangsize = C->output()->bang_size_in_bytes();
@ -743,7 +742,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
__ verified_entry(framesize, C->output()->need_stack_bang(bangsize)?bangsize:0, false, C->stub_function() != nullptr);
C->output()->set_frame_complete(cbuf.insts_size());
C->output()->set_frame_complete(__ offset());
if (C->has_mach_constant_base_node()) {
// NOTE: We set the table base offset here because users might be
@ -795,10 +794,9 @@ void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
}
#endif
void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
void MachEpilogNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
{
Compile* C = ra_->C;
MacroAssembler _masm(&cbuf);
if (generate_vzeroupper(C)) {
// Clear upper bits of YMM registers when current compiled code uses
@ -825,7 +823,6 @@ void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
}
if (do_polling() && C->is_method_compilation()) {
MacroAssembler _masm(&cbuf);
Label dummy_label;
Label* code_stub = &dummy_label;
if (!C->output()->in_scratch_emit_size()) {
@ -881,16 +878,15 @@ static enum RC rc_class(OptoReg::Name reg)
}
// Next two methods are shared by 32- and 64-bit VM. They are defined in x86.ad.
static void vec_mov_helper(CodeBuffer *cbuf, int src_lo, int dst_lo,
static void vec_mov_helper(C2_MacroAssembler *masm, int src_lo, int dst_lo,
int src_hi, int dst_hi, uint ireg, outputStream* st);
void vec_spill_helper(CodeBuffer *cbuf, bool is_load,
void vec_spill_helper(C2_MacroAssembler *masm, bool is_load,
int stack_offset, int reg, uint ireg, outputStream* st);
static void vec_stack_to_stack_helper(CodeBuffer *cbuf, int src_offset,
static void vec_stack_to_stack_helper(C2_MacroAssembler *masm, int src_offset,
int dst_offset, uint ireg, outputStream* st) {
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
switch (ireg) {
case Op_VecS:
__ movq(Address(rsp, -8), rax);
@ -966,11 +962,11 @@ static void vec_stack_to_stack_helper(CodeBuffer *cbuf, int src_offset,
}
}
uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
uint MachSpillCopyNode::implementation(C2_MacroAssembler* masm,
PhaseRegAlloc* ra_,
bool do_size,
outputStream* st) const {
assert(cbuf != nullptr || st != nullptr, "sanity");
assert(masm != nullptr || st != nullptr, "sanity");
// Get registers to move
OptoReg::Name src_second = ra_->get_reg_second(in(1));
OptoReg::Name src_first = ra_->get_reg_first(in(1));
@ -997,15 +993,15 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
// mem -> mem
int src_offset = ra_->reg2offset(src_first);
int dst_offset = ra_->reg2offset(dst_first);
vec_stack_to_stack_helper(cbuf, src_offset, dst_offset, ireg, st);
vec_stack_to_stack_helper(masm, src_offset, dst_offset, ireg, st);
} else if (src_first_rc == rc_float && dst_first_rc == rc_float ) {
vec_mov_helper(cbuf, src_first, dst_first, src_second, dst_second, ireg, st);
vec_mov_helper(masm, src_first, dst_first, src_second, dst_second, ireg, st);
} else if (src_first_rc == rc_float && dst_first_rc == rc_stack ) {
int stack_offset = ra_->reg2offset(dst_first);
vec_spill_helper(cbuf, false, stack_offset, src_first, ireg, st);
vec_spill_helper(masm, false, stack_offset, src_first, ireg, st);
} else if (src_first_rc == rc_stack && dst_first_rc == rc_float ) {
int stack_offset = ra_->reg2offset(src_first);
vec_spill_helper(cbuf, true, stack_offset, dst_first, ireg, st);
vec_spill_helper(masm, true, stack_offset, dst_first, ireg, st);
} else {
ShouldNotReachHere();
}
@ -1021,8 +1017,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
// 64-bit
int src_offset = ra_->reg2offset(src_first);
int dst_offset = ra_->reg2offset(dst_first);
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ pushq(Address(rsp, src_offset));
__ popq (Address(rsp, dst_offset));
#ifndef PRODUCT
@ -1039,8 +1034,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
// No pushl/popl, so:
int src_offset = ra_->reg2offset(src_first);
int dst_offset = ra_->reg2offset(dst_first);
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ movq(Address(rsp, -8), rax);
__ movl(rax, Address(rsp, src_offset));
__ movl(Address(rsp, dst_offset), rax);
@ -1062,8 +1056,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
int offset = ra_->reg2offset(src_first);
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ movq(as_Register(Matcher::_regEncode[dst_first]), Address(rsp, offset));
#ifndef PRODUCT
} else {
@ -1077,8 +1070,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
int offset = ra_->reg2offset(src_first);
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ movl(as_Register(Matcher::_regEncode[dst_first]), Address(rsp, offset));
#ifndef PRODUCT
} else {
@ -1095,8 +1087,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
int offset = ra_->reg2offset(src_first);
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ movdbl( as_XMMRegister(Matcher::_regEncode[dst_first]), Address(rsp, offset));
#ifndef PRODUCT
} else {
@ -1111,8 +1102,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
int offset = ra_->reg2offset(src_first);
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ movflt( as_XMMRegister(Matcher::_regEncode[dst_first]), Address(rsp, offset));
#ifndef PRODUCT
} else {
@ -1129,8 +1119,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
int offset = ra_->reg2offset(src_first);
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ kmov(as_KRegister(Matcher::_regEncode[dst_first]), Address(rsp, offset));
#ifndef PRODUCT
} else {
@ -1150,8 +1139,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
int offset = ra_->reg2offset(dst_first);
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ movq(Address(rsp, offset), as_Register(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@ -1165,8 +1153,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
int offset = ra_->reg2offset(dst_first);
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ movl(Address(rsp, offset), as_Register(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@ -1182,8 +1169,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
if ((src_first & 1) == 0 && src_first + 1 == src_second &&
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ movq(as_Register(Matcher::_regEncode[dst_first]),
as_Register(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
@ -1198,8 +1184,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
// 32-bit
assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ movl(as_Register(Matcher::_regEncode[dst_first]),
as_Register(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
@ -1216,8 +1201,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
if ((src_first & 1) == 0 && src_first + 1 == src_second &&
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ movdq( as_XMMRegister(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@ -1230,8 +1214,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
// 32-bit
assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ movdl( as_XMMRegister(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@ -1246,8 +1229,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
if ((src_first & 1) == 0 && src_first + 1 == src_second &&
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ kmov(as_KRegister(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@ -1268,8 +1250,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
int offset = ra_->reg2offset(dst_first);
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ movdbl( Address(rsp, offset), as_XMMRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@ -1283,8 +1264,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
int offset = ra_->reg2offset(dst_first);
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ movflt(Address(rsp, offset), as_XMMRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@ -1300,8 +1280,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
if ((src_first & 1) == 0 && src_first + 1 == src_second &&
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ movdq( as_Register(Matcher::_regEncode[dst_first]), as_XMMRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@ -1314,8 +1293,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
// 32-bit
assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ movdl( as_Register(Matcher::_regEncode[dst_first]), as_XMMRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@ -1331,8 +1309,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
if ((src_first & 1) == 0 && src_first + 1 == src_second &&
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ movdbl( as_XMMRegister(Matcher::_regEncode[dst_first]), as_XMMRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@ -1346,8 +1323,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
// 32-bit
assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ movflt( as_XMMRegister(Matcher::_regEncode[dst_first]), as_XMMRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@ -1370,8 +1346,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
int offset = ra_->reg2offset(dst_first);
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ kmov(Address(rsp, offset), as_KRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@ -1386,8 +1361,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
if ((src_first & 1) == 0 && src_first + 1 == src_second &&
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ kmov(as_Register(Matcher::_regEncode[dst_first]), as_KRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@ -1403,8 +1377,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
if ((src_first & 1) == 0 && src_first + 1 == src_second &&
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
if (cbuf) {
MacroAssembler _masm(cbuf);
if (masm) {
__ kmov(as_KRegister(Matcher::_regEncode[dst_first]), as_KRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@ -1432,8 +1405,8 @@ void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const {
}
#endif
void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
implementation(&cbuf, ra_, false, nullptr);
void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
implementation(masm, ra_, false, nullptr);
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
@ -1451,13 +1424,12 @@ void BoxLockNode::format(PhaseRegAlloc* ra_, outputStream* st) const
}
#endif
void BoxLockNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
void BoxLockNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
{
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_encode(this);
MacroAssembler masm(&cbuf);
masm.lea(as_Register(reg), Address(rsp, offset));
__ lea(as_Register(reg), Address(rsp, offset));
}
uint BoxLockNode::size(PhaseRegAlloc *ra_) const
@ -1481,10 +1453,9 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
}
#endif
void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
{
MacroAssembler masm(&cbuf);
masm.ic_check(InteriorEntryAlignment);
__ ic_check(InteriorEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
@ -1663,7 +1634,6 @@ encode %{
// [REX_B]
// f: f7 f9 idiv $div
// 0000000000000011 <done>:
MacroAssembler _masm(&cbuf);
Label normal;
Label done;
@ -1719,7 +1689,6 @@ encode %{
// 17: 48 99 cqto
// 19: 48 f7 f9 idiv $div
// 000000000000001c <done>:
MacroAssembler _masm(&cbuf);
Label normal;
Label done;
@ -1761,7 +1730,6 @@ encode %{
Label miss;
const bool set_cond_codes = true;
MacroAssembler _masm(&cbuf);
__ check_klass_subtype_slow_path(Rrsi, Rrax, Rrcx, Rrdi,
nullptr, &miss,
/*set_cond_codes:*/ true);
@ -1772,21 +1740,19 @@ encode %{
%}
enc_class clear_avx %{
debug_only(int off0 = cbuf.insts_size());
debug_only(int off0 = __ offset());
if (generate_vzeroupper(Compile::current())) {
// Clear upper bits of YMM registers to avoid AVX <-> SSE transition penalty
// Clear upper bits of YMM registers when current compiled code uses
// wide vectors to avoid AVX <-> SSE transition penalty during call.
MacroAssembler _masm(&cbuf);
__ vzeroupper();
}
debug_only(int off1 = cbuf.insts_size());
debug_only(int off1 = __ offset());
assert(off1 - off0 == clear_avx_size(), "correct size prediction");
%}
enc_class Java_To_Runtime(method meth) %{
// No relocation needed
MacroAssembler _masm(&cbuf);
__ mov64(r10, (int64_t) $meth$$method);
__ call(r10);
__ post_call_nop();
@ -1797,8 +1763,6 @@ encode %{
// JAVA STATIC CALL
// CALL to fixup routine. Fixup routine uses ScopeDesc info to
// determine who we intended to call.
MacroAssembler _masm(&cbuf);
if (!_method) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, $meth$$method)));
} else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
@ -1807,7 +1771,7 @@ encode %{
__ addr_nop_5();
__ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
} else {
int method_index = resolved_method_index(cbuf);
int method_index = resolved_method_index(masm);
RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
: static_call_Relocation::spec(method_index);
address mark = __ pc();
@ -1816,10 +1780,11 @@ encode %{
if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
// Calls of the same statically bound method can share
// a stub to the interpreter.
cbuf.shared_stub_to_interp_for(_method, call_offset);
__ code()->shared_stub_to_interp_for(_method, call_offset);
} else {
// Emit stubs for static call.
address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, mark);
address stub = CompiledDirectCall::emit_to_interp_stub(masm, mark);
__ clear_inst_mark();
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@ -1830,8 +1795,7 @@ encode %{
%}
enc_class Java_Dynamic_Call(method meth) %{
MacroAssembler _masm(&cbuf);
__ ic_call((address)$meth$$method, resolved_method_index(cbuf));
__ ic_call((address)$meth$$method, resolved_method_index(masm));
__ post_call_nop();
%}
@ -4351,7 +4315,7 @@ instruct maxF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRe
format %{ "$dst = max($a, $b)\t# intrinsic (float)" %}
ins_encode %{
emit_fp_min_max(_masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register,
emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register,
false /*min*/, true /*single*/);
%}
ins_pipe( pipe_slow );
@ -4376,7 +4340,7 @@ instruct maxD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xmmt, rRe
format %{ "$dst = max($a, $b)\t# intrinsic (double)" %}
ins_encode %{
emit_fp_min_max(_masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register,
emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register,
false /*min*/, false /*single*/);
%}
ins_pipe( pipe_slow );
@ -4401,7 +4365,7 @@ instruct minF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRe
format %{ "$dst = min($a, $b)\t# intrinsic (float)" %}
ins_encode %{
emit_fp_min_max(_masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register,
emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register,
true /*min*/, true /*single*/);
%}
ins_pipe( pipe_slow );
@ -4426,7 +4390,7 @@ instruct minD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xmmt, rRe
format %{ "$dst = min($a, $b)\t# intrinsic (double)" %}
ins_encode %{
emit_fp_min_max(_masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register,
emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register,
true /*min*/, false /*single*/);
%}
ins_pipe( pipe_slow );
@ -9732,7 +9696,7 @@ instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2)
"exit:" %}
ins_encode %{
__ ucomiss($src1$$XMMRegister, $src2$$XMMRegister);
emit_cmpfp_fixup(_masm);
emit_cmpfp_fixup(masm);
%}
ins_pipe(pipe_slow);
%}
@ -9783,7 +9747,7 @@ instruct cmpD_cc_reg(rFlagsRegU cr, regD src1, regD src2)
"exit:" %}
ins_encode %{
__ ucomisd($src1$$XMMRegister, $src2$$XMMRegister);
emit_cmpfp_fixup(_masm);
emit_cmpfp_fixup(masm);
%}
ins_pipe(pipe_slow);
%}
@ -9836,7 +9800,7 @@ instruct cmpF_reg(rRegI dst, regF src1, regF src2, rFlagsReg cr)
"done:" %}
ins_encode %{
__ ucomiss($src1$$XMMRegister, $src2$$XMMRegister);
emit_cmpfp3(_masm, $dst$$Register);
emit_cmpfp3(masm, $dst$$Register);
%}
ins_pipe(pipe_slow);
%}
@ -9857,7 +9821,7 @@ instruct cmpF_mem(rRegI dst, regF src1, memory src2, rFlagsReg cr)
"done:" %}
ins_encode %{
__ ucomiss($src1$$XMMRegister, $src2$$Address);
emit_cmpfp3(_masm, $dst$$Register);
emit_cmpfp3(masm, $dst$$Register);
%}
ins_pipe(pipe_slow);
%}
@ -9877,7 +9841,7 @@ instruct cmpF_imm(rRegI dst, regF src, immF con, rFlagsReg cr) %{
"done:" %}
ins_encode %{
__ ucomiss($src$$XMMRegister, $constantaddress($con));
emit_cmpfp3(_masm, $dst$$Register);
emit_cmpfp3(masm, $dst$$Register);
%}
ins_pipe(pipe_slow);
%}
@ -9898,7 +9862,7 @@ instruct cmpD_reg(rRegI dst, regD src1, regD src2, rFlagsReg cr)
"done:" %}
ins_encode %{
__ ucomisd($src1$$XMMRegister, $src2$$XMMRegister);
emit_cmpfp3(_masm, $dst$$Register);
emit_cmpfp3(masm, $dst$$Register);
%}
ins_pipe(pipe_slow);
%}
@ -9919,7 +9883,7 @@ instruct cmpD_mem(rRegI dst, regD src1, memory src2, rFlagsReg cr)
"done:" %}
ins_encode %{
__ ucomisd($src1$$XMMRegister, $src2$$Address);
emit_cmpfp3(_masm, $dst$$Register);
emit_cmpfp3(masm, $dst$$Register);
%}
ins_pipe(pipe_slow);
%}
@ -9939,7 +9903,7 @@ instruct cmpD_imm(rRegI dst, regD src, immD con, rFlagsReg cr) %{
"done:" %}
ins_encode %{
__ ucomisd($src$$XMMRegister, $constantaddress($con));
emit_cmpfp3(_masm, $dst$$Register);
emit_cmpfp3(masm, $dst$$Register);
%}
ins_pipe(pipe_slow);
%}

View File

@ -0,0 +1,31 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_ZERO_C2_MACROASSEMBLER_ZERO_HPP
#define CPU_ZERO_C2_MACROASSEMBLER_ZERO_HPP
// C2_MacroAssembler contains high-level macros for C2
#endif // CPU_ZERO_C2_MACROASSEMBLER_ZERO_HPP

View File

@ -42,7 +42,7 @@
// ----------------------------------------------------------------------------
address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address mark) {
ShouldNotReachHere(); // Only needed for COMPILER2.
return nullptr;
}

View File

@ -2896,14 +2896,6 @@ void ADLParser::ins_encode_parse_block(InstructForm& inst) {
encoding->add_parameter(opForm->_ident, param);
}
if (!inst._is_postalloc_expand) {
// Define a MacroAssembler instance for use by the encoding. The
// name is chosen to match the __ idiom used for assembly in other
// parts of hotspot and assumes the existence of the standard
// #define __ _masm.
encoding->add_code(" C2_MacroAssembler _masm(&cbuf);\n");
}
// Parse the following %{ }% block
ins_encode_parse_block_impl(inst, encoding, ec_name);

View File

@ -1902,7 +1902,7 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
// target specific instruction object encodings.
// Define the ___Node::emit() routine
//
// (1) void ___Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// (1) void ___Node::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
// (2) // ... encoding defined by user
// (3)
// (4) }
@ -2301,7 +2301,7 @@ public:
// Check results of prior scan
if ( ! _may_reloc ) {
// Definitely don't need relocation information
fprintf( _fp, "emit_%s(cbuf, ", d32_hi_lo );
fprintf( _fp, "emit_%s(masm, ", d32_hi_lo );
emit_replacement(); fprintf(_fp, ")");
}
else {
@ -2315,26 +2315,26 @@ public:
fprintf(_fp,"if ( opnd_array(%d)->%s_reloc() != relocInfo::none ) {\n",
_operand_idx, disp_constant);
fprintf(_fp," ");
fprintf(_fp,"emit_%s_reloc(cbuf, ", d32_hi_lo );
fprintf(_fp,"emit_%s_reloc(masm, ", d32_hi_lo );
emit_replacement(); fprintf(_fp,", ");
fprintf(_fp,"opnd_array(%d)->%s_reloc(), ",
_operand_idx, disp_constant);
fprintf(_fp, "%d", _reloc_form);fprintf(_fp, ");");
fprintf(_fp,"\n");
fprintf(_fp,"} else {\n");
fprintf(_fp," emit_%s(cbuf, ", d32_hi_lo);
fprintf(_fp," emit_%s(masm, ", d32_hi_lo);
emit_replacement(); fprintf(_fp, ");\n"); fprintf(_fp,"}");
}
}
else if ( _doing_emit_d16 ) {
// Relocation of 16-bit values is not supported
fprintf(_fp,"emit_d16(cbuf, ");
fprintf(_fp,"emit_d16(masm, ");
emit_replacement(); fprintf(_fp, ")");
// No relocation done for 16-bit values
}
else if ( _doing_emit8 ) {
// Relocation of 8-bit values is not supported
fprintf(_fp,"emit_d8(cbuf, ");
fprintf(_fp,"emit_d8(masm, ");
emit_replacement(); fprintf(_fp, ")");
// No relocation done for 8-bit values
}
@ -2675,7 +2675,7 @@ void ArchDesc::defineEmit(FILE* fp, InstructForm& inst) {
// (1)
// Output instruction's emit prototype
fprintf(fp, "void %sNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {\n", inst._ident);
fprintf(fp, "void %sNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {\n", inst._ident);
// If user did not define an encode section,
// provide stub that does not generate any machine code.
@ -2685,12 +2685,9 @@ void ArchDesc::defineEmit(FILE* fp, InstructForm& inst) {
return;
}
// Save current instruction's starting address (helps with relocation).
fprintf(fp, " cbuf.set_insts_mark();\n");
// For MachConstantNodes which are ideal jump nodes, fill the jump table.
if (inst.is_mach_constant() && inst.is_ideal_jump()) {
fprintf(fp, " ra_->C->output()->constant_table().fill_jump_table(cbuf, (MachConstantNode*) this, _index2label);\n");
fprintf(fp, " ra_->C->output()->constant_table().fill_jump_table(masm, (MachConstantNode*) this, _index2label);\n");
}
// Output each operand's offset into the array of registers.

View File

@ -1629,7 +1629,7 @@ void ArchDesc::declareClasses(FILE *fp) {
fprintf(fp," virtual bool requires_postalloc_expand() const { return true; }\n");
fprintf(fp," virtual void postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_);\n");
} else {
fprintf(fp," virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;\n");
fprintf(fp," virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const;\n");
}
}

View File

@ -227,7 +227,8 @@ class AbstractAssembler : public ResourceObj {
bool isByte(int x) const { return 0 <= x && x < 0x100; }
bool isShiftCount(int x) const { return 0 <= x && x < 32; }
// Instruction boundaries (required when emitting relocatable values).
// Mark instruction boundaries, this is required when emitting relocatable values.
// Basically, all instructions that directly or indirectly use Assembler::emit_data* methods.
class InstructionMark: public StackObj {
private:
AbstractAssembler* _assm;
@ -366,6 +367,7 @@ class AbstractAssembler : public ResourceObj {
CodeBuffer* code() const { return code_section()->outer(); }
int sect() const { return code_section()->index(); }
address pc() const { return code_section()->end(); }
address begin() const { return code_section()->start(); }
int offset() const { return code_section()->size(); }
int locator() const { return CodeBuffer::locator(offset(), sect()); }
@ -374,10 +376,11 @@ class AbstractAssembler : public ResourceObj {
void register_skipped(int size) { code_section()->register_skipped(size); }
address inst_mark() const { return code_section()->mark(); }
void set_inst_mark() { code_section()->set_mark(); }
void clear_inst_mark() { code_section()->clear_mark(); }
address inst_mark() const { return code_section()->mark(); }
void set_inst_mark() { code_section()->set_mark(); }
void set_inst_mark(address addr) { code_section()->set_mark(addr); }
void clear_inst_mark() { code_section()->clear_mark(); }
void set_inst_end(address addr) { code_section()->set_end(addr); }
// Constants in code
void relocate(RelocationHolder const& rspec, int format = 0) {
@ -389,6 +392,12 @@ class AbstractAssembler : public ResourceObj {
void relocate( relocInfo::relocType rtype, int format = 0) {
code_section()->relocate(code_section()->end(), rtype, format);
}
void relocate(address addr, relocInfo::relocType rtype, int format = 0) {
code_section()->relocate(addr, rtype, format);
}
void relocate(address addr, RelocationHolder const& rspec, int format = 0) {
code_section()->relocate(addr, rspec, format);
}
static int code_fill_byte(); // used to pad out odd-sized code buffers

View File

@ -28,6 +28,7 @@
#include "code/nativeInst.hpp"
#include "interpreter/linkResolver.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "opto/c2_MacroAssembler.hpp"
//-----------------------------------------------------------------------------
// The CompiledIC represents a compiled inline cache.
@ -185,7 +186,7 @@ private:
public:
// Returns null if CodeBuffer::expand fails
static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = nullptr);
static address emit_to_interp_stub(MacroAssembler *masm, address mark = nullptr);
static int to_interp_stub_size();
static int to_trampoline_stub_size();
static int reloc_to_interp_stub();

View File

@ -1243,7 +1243,8 @@ void CodeInstaller::site_Call(CodeBuffer& buffer, u1 tag, jint pc_offset, HotSpo
CodeInstaller::pd_relocate_JavaMethod(buffer, method, pc_offset, JVMCI_CHECK);
if (_next_call_type == INVOKESTATIC || _next_call_type == INVOKESPECIAL) {
// Need a static call stub for transitions from compiled to interpreted.
if (CompiledDirectCall::emit_to_interp_stub(buffer, _instructions->start() + pc_offset) == nullptr) {
MacroAssembler masm(&buffer);
if (CompiledDirectCall::emit_to_interp_stub(&masm, _instructions->start() + pc_offset) == nullptr) {
JVMCI_ERROR("could not emit to_interp stub - code cache is full");
}
}

View File

@ -33,22 +33,21 @@
C2CodeStubList::C2CodeStubList() :
_stubs(Compile::current()->comp_arena(), 2, 0, nullptr) {}
void C2CodeStubList::emit(CodeBuffer& cb) {
C2_MacroAssembler masm(&cb);
void C2CodeStubList::emit(C2_MacroAssembler& masm) {
for (int i = _stubs.length() - 1; i >= 0; i--) {
C2CodeStub* stub = _stubs.at(i);
int max_size = stub->max_size();
// Make sure there is enough space in the code buffer
if (cb.insts()->maybe_expand_to_ensure_remaining(max_size) && cb.blob() == nullptr) {
if (masm.code()->insts()->maybe_expand_to_ensure_remaining(max_size) && masm.code()->blob() == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
DEBUG_ONLY(int size_before = cb.insts_size();)
DEBUG_ONLY(int size_before = masm.offset();)
stub->emit(masm);
DEBUG_ONLY(int actual_size = cb.insts_size() - size_before;)
DEBUG_ONLY(int actual_size = masm.offset() - size_before;)
assert(max_size >= actual_size, "Expected stub size (%d) must be larger than or equal to actual stub size (%d)", max_size, actual_size);
}
}

View File

@ -68,7 +68,7 @@ public:
C2CodeStubList();
void add_stub(C2CodeStub* stub) { _stubs.append(stub); }
void emit(CodeBuffer& cb);
void emit(C2_MacroAssembler& masm);
};
class C2SafepointPollStub : public C2CodeStub {

View File

@ -687,7 +687,7 @@ public:
virtual const Type* Value(PhaseGVN* phase) const;
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual int required_outcnt() const { return 2; }
virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { }
virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const { }
virtual uint size(PhaseRegAlloc *ra_) const { return 0; }
#ifndef PRODUCT
virtual void format( PhaseRegAlloc *, outputStream *st ) const;

View File

@ -144,28 +144,27 @@ void ConstantTable::calculate_offsets_and_size() {
_size = align_up(offset, (int)CodeEntryAlignment);
}
bool ConstantTable::emit(CodeBuffer& cb) const {
MacroAssembler _masm(&cb);
bool ConstantTable::emit(C2_MacroAssembler* masm) const {
for (int i = 0; i < _constants.length(); i++) {
Constant con = _constants.at(i);
address constant_addr = nullptr;
if (con.is_array()) {
constant_addr = _masm.array_constant(con.type(), con.get_array(), con.alignment());
constant_addr = masm->array_constant(con.type(), con.get_array(), con.alignment());
} else {
switch (con.type()) {
case T_INT: constant_addr = _masm.int_constant( con.get_jint() ); break;
case T_LONG: constant_addr = _masm.long_constant( con.get_jlong() ); break;
case T_FLOAT: constant_addr = _masm.float_constant( con.get_jfloat() ); break;
case T_DOUBLE: constant_addr = _masm.double_constant(con.get_jdouble()); break;
case T_INT: constant_addr = masm->int_constant( con.get_jint() ); break;
case T_LONG: constant_addr = masm->long_constant( con.get_jlong() ); break;
case T_FLOAT: constant_addr = masm->float_constant( con.get_jfloat() ); break;
case T_DOUBLE: constant_addr = masm->double_constant(con.get_jdouble()); break;
case T_OBJECT: {
jobject obj = con.get_jobject();
int oop_index = _masm.oop_recorder()->find_index(obj);
constant_addr = _masm.address_constant((address) obj, oop_Relocation::spec(oop_index));
int oop_index = masm->oop_recorder()->find_index(obj);
constant_addr = masm->address_constant((address) obj, oop_Relocation::spec(oop_index));
break;
}
case T_ADDRESS: {
address addr = (address) con.get_jobject();
constant_addr = _masm.address_constant(addr);
constant_addr = masm->address_constant(addr);
break;
}
// We use T_VOID as marker for jump-table entries (labels) which
@ -175,23 +174,23 @@ bool ConstantTable::emit(CodeBuffer& cb) const {
// Fill the jump-table with a dummy word. The real value is
// filled in later in fill_jump_table.
address dummy = (address) n;
constant_addr = _masm.address_constant(dummy);
constant_addr = masm->address_constant(dummy);
if (constant_addr == nullptr) {
return false;
}
assert((constant_addr - _masm.code()->consts()->start()) == con.offset(),
"must be: %d == %d", (int)(constant_addr - _masm.code()->consts()->start()), (int)(con.offset()));
assert((constant_addr - masm->code()->consts()->start()) == con.offset(),
"must be: %d == %d", (int)(constant_addr - masm->code()->consts()->start()), (int)(con.offset()));
// Expand jump-table
address last_addr = nullptr;
for (uint j = 1; j < n->outcnt(); j++) {
last_addr = _masm.address_constant(dummy + j);
last_addr = masm->address_constant(dummy + j);
if (last_addr == nullptr) {
return false;
}
}
#ifdef ASSERT
address start = _masm.code()->consts()->start();
address start = masm->code()->consts()->start();
address new_constant_addr = last_addr - ((n->outcnt() - 1) * sizeof(address));
// Expanding the jump-table could result in an expansion of the const code section.
// In that case, we need to check if the new constant address matches the offset.
@ -203,8 +202,8 @@ bool ConstantTable::emit(CodeBuffer& cb) const {
}
case T_METADATA: {
Metadata* obj = con.get_metadata();
int metadata_index = _masm.oop_recorder()->find_index(obj);
constant_addr = _masm.address_constant((address) obj, metadata_Relocation::spec(metadata_index));
int metadata_index = masm->oop_recorder()->find_index(obj);
constant_addr = masm->address_constant((address) obj, metadata_Relocation::spec(metadata_index));
break;
}
default: ShouldNotReachHere();
@ -214,8 +213,8 @@ bool ConstantTable::emit(CodeBuffer& cb) const {
if (constant_addr == nullptr) {
return false;
}
assert((constant_addr - _masm.code()->consts()->start()) == con.offset(),
"must be: %d == %d", (int)(constant_addr - _masm.code()->consts()->start()), (int)(con.offset()));
assert((constant_addr - masm->code()->consts()->start()) == con.offset(),
"must be: %d == %d", (int)(constant_addr - masm->code()->consts()->start()), (int)(con.offset()));
}
return true;
}
@ -292,7 +291,7 @@ ConstantTable::Constant ConstantTable::add_jump_table(MachConstantNode* n) {
return con;
}
void ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const {
void ConstantTable::fill_jump_table(C2_MacroAssembler* masm, MachConstantNode* n, GrowableArray<Label*> labels) const {
// If called from Compile::scratch_emit_size do nothing.
if (Compile::current()->output()->in_scratch_emit_size()) return;
@ -304,13 +303,12 @@ void ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n, Growabl
// to get the plain offset into the constant table.
int offset = n->constant_offset() - table_base_offset();
MacroAssembler _masm(&cb);
address* jump_table_base = (address*) (_masm.code()->consts()->start() + offset);
address* jump_table_base = (address*) (masm->code()->consts()->start() + offset);
for (uint i = 0; i < n->outcnt(); i++) {
address* constant_addr = &jump_table_base[i];
assert(*constant_addr == (((address) n) + i), "all jump-table entries must contain adjusted node pointer: " INTPTR_FORMAT " == " INTPTR_FORMAT, p2i(*constant_addr), p2i(((address) n) + i));
*constant_addr = cb.consts()->target(*labels.at(i), (address) constant_addr);
cb.consts()->relocate((address) constant_addr, relocInfo::internal_word_type);
*constant_addr = masm->code()->consts()->target(*labels.at(i), (address) constant_addr);
masm->code()->consts()->relocate((address) constant_addr, relocInfo::internal_word_type);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,10 +27,10 @@
#include "utilities/globalDefinitions.hpp"
class CodeBuffer;
class Metadata;
class MachConstantNode;
class MachOper;
class C2_MacroAssembler;
class ConstantTable {
public:
@ -139,7 +139,7 @@ public:
void set_table_base_offset(int x) { assert(_table_base_offset == -1 || x == _table_base_offset, "can't change"); _table_base_offset = x; }
int table_base_offset() const { assert(_table_base_offset != -1, "not set yet"); return _table_base_offset; }
bool emit(CodeBuffer& cb) const;
bool emit(C2_MacroAssembler* masm) const;
// Returns the offset of the last entry (the top) of the constant table.
int top_offset() const { assert(_constants.top().offset() != -1, "not bound yet"); return _constants.top().offset(); }
@ -172,7 +172,7 @@ public:
// Jump-table
Constant add_jump_table(MachConstantNode* n);
void fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const;
void fill_jump_table(C2_MacroAssembler* masm, MachConstantNode* n, GrowableArray<Label*> labels) const;
};

View File

@ -68,7 +68,7 @@ private:
public:
BoxLockNode( int lock );
virtual int Opcode() const;
virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const;
virtual uint size(PhaseRegAlloc *ra_) const;
virtual const RegMask &in_RegMask(uint) const;
virtual const RegMask &out_RegMask() const;

View File

@ -132,7 +132,7 @@ bool methodOper::cmp( const MachOper &oper ) const {
//------------------------------MachNode---------------------------------------
//------------------------------emit-------------------------------------------
void MachNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
void MachNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
#ifdef ASSERT
tty->print("missing MachNode emit function: ");
dump();
@ -604,7 +604,7 @@ void MachNullCheckNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
}
#endif
void MachNullCheckNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
void MachNullCheckNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
// only emits entries in the null-pointer exception handler table
}
void MachNullCheckNode::label_set(Label* label, uint block_num) {

View File

@ -25,6 +25,7 @@
#ifndef SHARE_OPTO_MACHNODE_HPP
#define SHARE_OPTO_MACHNODE_HPP
#include "opto/c2_MacroAssembler.hpp"
#include "opto/callnode.hpp"
#include "opto/constantTable.hpp"
#include "opto/matcher.hpp"
@ -34,7 +35,6 @@
#include "utilities/growableArray.hpp"
class BufferBlob;
class CodeBuffer;
class JVMState;
class MachCallDynamicJavaNode;
class MachCallJavaNode;
@ -284,8 +284,8 @@ public:
MachOper **_opnds;
uint16_t num_opnds() const { return _num_opnds; }
// Emit bytes into cbuf
virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
// Emit bytes using C2_MacroAssembler
virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const;
// Expand node after register allocation.
// Node is replaced by several nodes in the postalloc expand phase.
// Corresponding methods are generated for nodes if they specify
@ -421,7 +421,7 @@ public:
class MachBreakpointNode : public MachIdealNode {
public:
MachBreakpointNode( ) {}
virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const;
virtual uint size(PhaseRegAlloc *ra_) const;
#ifndef PRODUCT
@ -447,7 +447,7 @@ public:
virtual bool requires_postalloc_expand() const;
virtual void postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_);
virtual void emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const;
virtual void emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const;
virtual uint size(PhaseRegAlloc* ra_) const;
static const RegMask& static_out_RegMask() { return _out_RegMask; }
@ -498,7 +498,7 @@ public:
class MachUEPNode : public MachIdealNode {
public:
MachUEPNode( ) {}
virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const;
virtual uint size(PhaseRegAlloc *ra_) const;
#ifndef PRODUCT
@ -512,7 +512,7 @@ public:
class MachPrologNode : public MachIdealNode {
public:
MachPrologNode( ) {}
virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const;
virtual uint size(PhaseRegAlloc *ra_) const;
virtual int reloc() const;
@ -527,7 +527,7 @@ public:
class MachEpilogNode : public MachIdealNode {
public:
MachEpilogNode(bool do_poll = false) : _do_polling(do_poll) {}
virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const;
virtual uint size(PhaseRegAlloc *ra_) const;
virtual int reloc() const;
virtual const Pipeline *pipeline() const;
@ -552,7 +552,7 @@ private:
public:
MachNopNode( ) : _count(1) {}
MachNopNode( int count ) : _count(count) {}
virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const;
virtual uint size(PhaseRegAlloc *ra_) const;
virtual const class Type *bottom_type() const { return Type::CONTROL; }
@ -610,9 +610,9 @@ public:
virtual const class Type *bottom_type() const { return _type; }
virtual uint ideal_reg() const { return _type->ideal_reg(); }
virtual uint oper_input_base() const { return 1; }
uint implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const;
uint implementation( C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const;
virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const;
virtual uint size(PhaseRegAlloc *ra_) const;
@ -675,7 +675,7 @@ public:
virtual const class Type *bottom_type() const { return in(1)->bottom_type(); }
virtual uint ideal_reg() const { return bottom_type()->ideal_reg(); }
virtual uint oper_input_base() const { return 1; }
virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { }
virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const { }
virtual uint size(PhaseRegAlloc *ra_) const { return 0; }
#ifndef PRODUCT
virtual const char *Name() const { return "MachMerge"; }
@ -715,7 +715,7 @@ public:
virtual int Opcode() const;
virtual uint size_of() const { return sizeof(*this); }
virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const;
virtual void label_set(Label* label, uint block_num);
virtual void save_label(Label** label, uint* block_num);
virtual void negate() { }
@ -946,13 +946,13 @@ public:
virtual const RegMask &in_RegMask(uint) const;
int resolved_method_index(CodeBuffer &cbuf) const {
int resolved_method_index(C2_MacroAssembler *masm) const {
if (_override_symbolic_info) {
// Attach corresponding Method* to the call site, so VM can use it during resolution
// instead of querying symbolic info from bytecode.
assert(_method != nullptr, "method should be set");
assert(_method->constant_encoding()->is_method(), "should point to a Method");
return cbuf.oop_recorder()->find_index(_method->constant_encoding());
return masm->code()->oop_recorder()->find_index(_method->constant_encoding());
}
return 0; // Use symbolic info from bytecode (resolved_method is null).
}
@ -1057,7 +1057,7 @@ private:
public:
virtual const RegMask &out_RegMask() const { return *_opnds[0]->in_RegMask(0); }
virtual uint rule() const { return 9999999; }
virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {}
virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {}
MachTempNode(MachOper* oper) {
init_class_id(Class_MachTemp);

View File

@ -1092,8 +1092,8 @@ juint Node::max_flags() {
// Print as assembly
void Node::format( PhaseRegAlloc *, outputStream *st ) const {}
//------------------------------emit-------------------------------------------
// Emit bytes starting at parameter 'ptr'.
void Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {}
// Emit bytes using C2_MacroAssembler
void Node::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {}
//------------------------------size-------------------------------------------
// Size of instruction in bytes
uint Node::size(PhaseRegAlloc *ra_) const { return 0; }

View File

@ -191,6 +191,7 @@ class ShiftVNode;
class ExpandVNode;
class CompressVNode;
class CompressMNode;
class C2_MacroAssembler;
#ifndef OPTO_DU_ITERATOR_ASSERT
@ -1181,9 +1182,8 @@ public:
// Print as assembly
virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const;
// Emit bytes starting at parameter 'ptr'
// Bump 'ptr' by the number of output bytes
virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
// Emit bytes using C2_MacroAssembler
virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const;
// Size of instruction in bytes
virtual uint size(PhaseRegAlloc *ra_) const;

View File

@ -363,7 +363,8 @@ void PhaseOutput::Output() {
return;
}
fill_buffer(cb, blk_starts);
C2_MacroAssembler masm(cb);
fill_buffer(&masm, blk_starts);
}
bool PhaseOutput::need_stack_bang(int frame_size_in_bytes) const {
@ -1368,7 +1369,7 @@ CodeBuffer* PhaseOutput::init_buffer() {
}
//------------------------------fill_buffer------------------------------------
void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
void PhaseOutput::fill_buffer(C2_MacroAssembler* masm, uint* blk_starts) {
// blk_starts[] contains offsets calculated during short branches processing,
// offsets should not be increased during following steps.
@ -1424,7 +1425,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// Emit the constant table.
if (C->has_mach_constant_base_node()) {
if (!constant_table().emit(*cb)) {
if (!constant_table().emit(masm)) {
C->record_failure("consts section overflow");
return;
}
@ -1447,14 +1448,14 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// than by falling-thru from the previous block), then force the
// start of a new bundle.
if (Pipeline::requires_bundling() && starts_bundle(head)) {
cb->flush_bundle(true);
masm->code()->flush_bundle(true);
}
#ifdef ASSERT
if (!block->is_connector()) {
stringStream st;
block->dump_head(C->cfg(), &st);
MacroAssembler(cb).block_comment(st.freeze());
masm->block_comment(st.freeze());
}
jmp_target[i] = 0;
jmp_offset[i] = 0;
@ -1464,7 +1465,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
int blk_offset = current_offset;
// Define the label at the beginning of the basic block
MacroAssembler(cb).bind(blk_labels[block->_pre_order]);
masm->bind(blk_labels[block->_pre_order]);
uint last_inst = block->number_of_nodes();
@ -1488,7 +1489,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// If this starts a new instruction group, then flush the current one
// (but allow split bundles)
if (Pipeline::requires_bundling() && starts_bundle(n))
cb->flush_bundle(false);
masm->code()->flush_bundle(false);
// Special handling for SafePoint/Call Nodes
bool is_mcall = false;
@ -1499,8 +1500,8 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// If this requires all previous instructions be flushed, then do so
if (is_sfn || is_mcall || mach->alignment_required() != 1) {
cb->flush_bundle(true);
current_offset = cb->insts_size();
masm->code()->flush_bundle(true);
current_offset = masm->offset();
}
// A padding may be needed again since a previous instruction
@ -1527,14 +1528,14 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
last_inst++;
C->cfg()->map_node_to_block(nop, block);
// Ensure enough space.
cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
return;
}
nop->emit(*cb, C->regalloc());
cb->flush_bundle(true);
current_offset = cb->insts_size();
nop->emit(masm, C->regalloc());
masm->code()->flush_bundle(true);
current_offset = masm->offset();
}
bool observe_safepoint = is_sfn;
@ -1612,9 +1613,9 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
block->insert_node(nop, j++);
C->cfg()->map_node_to_block(nop, block);
last_inst++;
nop->emit(*cb, C->regalloc());
cb->flush_bundle(true);
current_offset = cb->insts_size();
nop->emit(masm, C->regalloc());
masm->code()->flush_bundle(true);
current_offset = masm->offset();
}
#ifdef ASSERT
jmp_target[i] = block_num;
@ -1679,8 +1680,8 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
}
// Verify that there is sufficient space remaining
cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
return;
}
@ -1688,15 +1689,15 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// Save the offset for the listing
#if defined(SUPPORT_OPTO_ASSEMBLY)
if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
node_offsets[n->_idx] = cb->insts_size();
node_offsets[n->_idx] = masm->offset();
}
#endif
assert(!C->failing(), "Should not reach here if failing.");
// "Normal" instruction case
DEBUG_ONLY(uint instr_offset = cb->insts_size());
n->emit(*cb, C->regalloc());
current_offset = cb->insts_size();
DEBUG_ONLY(uint instr_offset = masm->offset());
n->emit(masm, C->regalloc());
current_offset = masm->offset();
// Above we only verified that there is enough space in the instruction section.
// However, the instruction may emit stubs that cause code buffer expansion.
@ -1715,7 +1716,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
n->dump();
mach->dump_format(C->regalloc(), tty);
tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
Disassembler::decode(cb->insts_begin() + instr_offset, cb->insts_begin() + current_offset + 1, tty);
Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
tty->print_cr(" ------------------- ");
BufferBlob* blob = this->scratch_buffer_blob();
address blob_begin = blob->content_begin();
@ -1746,12 +1747,12 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
guarantee(delay_slot != nullptr, "expecting delay slot node");
// Back up 1 instruction
cb->set_insts_end(cb->insts_end() - Pipeline::instr_unit_size());
masm->code()->set_insts_end(masm->code()->insts_end() - Pipeline::instr_unit_size());
// Save the offset for the listing
#if defined(SUPPORT_OPTO_ASSEMBLY)
if ((node_offsets != nullptr) && (delay_slot->_idx < node_offset_limit)) {
node_offsets[delay_slot->_idx] = cb->insts_size();
node_offsets[delay_slot->_idx] = masm->offset();
}
#endif
@ -1773,7 +1774,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
}
// Insert the delay slot instruction
delay_slot->emit(*cb, C->regalloc());
delay_slot->emit(masm, C->regalloc());
// Don't reuse it
delay_slot = nullptr;
@ -1790,8 +1791,8 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
MachNode *nop = new MachNopNode(padding / nop_size);
block->insert_node(nop, block->number_of_nodes());
C->cfg()->map_node_to_block(nop, block);
nop->emit(*cb, C->regalloc());
current_offset = cb->insts_size();
nop->emit(masm, C->regalloc());
current_offset = masm->offset();
}
}
// Verify that the distance for generated before forward
@ -1809,7 +1810,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
if (C->failing()) return;
// Define a pseudo-label at the end of the code
MacroAssembler(cb).bind( blk_labels[nblocks] );
masm->bind( blk_labels[nblocks] );
// Compute the size of the first block
_first_block_size = blk_labels[1].loc_pos() - blk_labels[0].loc_pos();
@ -1827,22 +1828,23 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
}
#endif
if (!cb->finalize_stubs()) {
if (!masm->code()->finalize_stubs()) {
C->record_failure("CodeCache is full");
return;
}
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
bs->emit_stubs(*cb);
bs->emit_stubs(*masm->code());
if (C->failing()) return;
// Fill in stubs.
_stub_list.emit(*cb);
assert(masm->inst_mark() == nullptr, "should be.");
_stub_list.emit(*masm);
if (C->failing()) return;
#ifndef PRODUCT
// Information on the size of the method, without the extraneous code
Scheduling::increment_method_size(cb->insts_size());
Scheduling::increment_method_size(masm->offset());
#endif
// ------------------
@ -1853,23 +1855,23 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// class HandlerImpl is platform-specific and defined in the *.ad files.
if (C->method()) {
// Emit the exception handler code.
_code_offsets.set_value(CodeOffsets::Exceptions, HandlerImpl::emit_exception_handler(*cb));
_code_offsets.set_value(CodeOffsets::Exceptions, HandlerImpl::emit_exception_handler(masm));
if (C->failing()) {
return; // CodeBuffer::expand failed
}
// Emit the deopt handler code.
_code_offsets.set_value(CodeOffsets::Deopt, HandlerImpl::emit_deopt_handler(*cb));
_code_offsets.set_value(CodeOffsets::Deopt, HandlerImpl::emit_deopt_handler(masm));
// Emit the MethodHandle deopt handler code (if required).
if (C->has_method_handle_invokes() && !C->failing()) {
// We can use the same code as for the normal deopt handler, we
// just need a different entry point address.
_code_offsets.set_value(CodeOffsets::DeoptMH, HandlerImpl::emit_deopt_handler(*cb));
_code_offsets.set_value(CodeOffsets::DeoptMH, HandlerImpl::emit_deopt_handler(masm));
}
}
// One last check for failed CodeBuffer::expand:
if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full");
return;
}
@ -3357,13 +3359,13 @@ uint PhaseOutput::scratch_emit_size(const Node* n) {
Label* saveL = nullptr;
uint save_bnum = 0;
bool is_branch = n->is_MachBranch();
C2_MacroAssembler masm(&buf);
masm.bind(fakeL);
if (is_branch) {
MacroAssembler masm(&buf);
masm.bind(fakeL);
n->as_MachBranch()->save_label(&saveL, &save_bnum);
n->as_MachBranch()->label_set(&fakeL, 0);
}
n->emit(buf, C->regalloc());
n->emit(&masm, C->regalloc());
// Emitting into the scratch buffer should not fail
assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());

View File

@ -154,7 +154,7 @@ public:
CodeBuffer* init_buffer();
// Write out basic block data to code buffer
void fill_buffer(CodeBuffer* cb, uint* blk_starts);
void fill_buffer(C2_MacroAssembler* masm, uint* blk_starts);
// Compute the information for the exception tables
void FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels);