8292153: x86: Represent Registers as values

Reviewed-by: kvn, aph
This commit is contained in:
Vladimir Ivanov 2022-08-11 21:16:02 +00:00
parent dedc05cb40
commit 755ecf6b73
28 changed files with 482 additions and 433 deletions

View File

@ -63,33 +63,33 @@ class Argument {
#ifdef _WIN64
REGISTER_DECLARATION(Register, c_rarg0, rcx);
REGISTER_DECLARATION(Register, c_rarg1, rdx);
REGISTER_DECLARATION(Register, c_rarg2, r8);
REGISTER_DECLARATION(Register, c_rarg3, r9);
constexpr Register c_rarg0 = rcx;
constexpr Register c_rarg1 = rdx;
constexpr Register c_rarg2 = r8;
constexpr Register c_rarg3 = r9;
REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
constexpr XMMRegister c_farg0 = xmm0;
constexpr XMMRegister c_farg1 = xmm1;
constexpr XMMRegister c_farg2 = xmm2;
constexpr XMMRegister c_farg3 = xmm3;
#else
REGISTER_DECLARATION(Register, c_rarg0, rdi);
REGISTER_DECLARATION(Register, c_rarg1, rsi);
REGISTER_DECLARATION(Register, c_rarg2, rdx);
REGISTER_DECLARATION(Register, c_rarg3, rcx);
REGISTER_DECLARATION(Register, c_rarg4, r8);
REGISTER_DECLARATION(Register, c_rarg5, r9);
constexpr Register c_rarg0 = rdi;
constexpr Register c_rarg1 = rsi;
constexpr Register c_rarg2 = rdx;
constexpr Register c_rarg3 = rcx;
constexpr Register c_rarg4 = r8;
constexpr Register c_rarg5 = r9;
REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4);
REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5);
REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6);
REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7);
constexpr XMMRegister c_farg0 = xmm0;
constexpr XMMRegister c_farg1 = xmm1;
constexpr XMMRegister c_farg2 = xmm2;
constexpr XMMRegister c_farg3 = xmm3;
constexpr XMMRegister c_farg4 = xmm4;
constexpr XMMRegister c_farg5 = xmm5;
constexpr XMMRegister c_farg6 = xmm6;
constexpr XMMRegister c_farg7 = xmm7;
#endif // _WIN64
@ -109,33 +109,33 @@ REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7);
// | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 |
// |-------------------------------------------------------|
REGISTER_DECLARATION(Register, j_rarg0, c_rarg1);
REGISTER_DECLARATION(Register, j_rarg1, c_rarg2);
REGISTER_DECLARATION(Register, j_rarg2, c_rarg3);
constexpr Register j_rarg0 = c_rarg1;
constexpr Register j_rarg1 = c_rarg2;
constexpr Register j_rarg2 = c_rarg3;
// Windows runs out of register args here
#ifdef _WIN64
REGISTER_DECLARATION(Register, j_rarg3, rdi);
REGISTER_DECLARATION(Register, j_rarg4, rsi);
constexpr Register j_rarg3 = rdi;
constexpr Register j_rarg4 = rsi;
#else
REGISTER_DECLARATION(Register, j_rarg3, c_rarg4);
REGISTER_DECLARATION(Register, j_rarg4, c_rarg5);
constexpr Register j_rarg3 = c_rarg4;
constexpr Register j_rarg4 = c_rarg5;
#endif /* _WIN64 */
REGISTER_DECLARATION(Register, j_rarg5, c_rarg0);
constexpr Register j_rarg5 = c_rarg0;
REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0);
REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1);
REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2);
REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3);
REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4);
REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5);
REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6);
REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7);
constexpr XMMRegister j_farg0 = xmm0;
constexpr XMMRegister j_farg1 = xmm1;
constexpr XMMRegister j_farg2 = xmm2;
constexpr XMMRegister j_farg3 = xmm3;
constexpr XMMRegister j_farg4 = xmm4;
constexpr XMMRegister j_farg5 = xmm5;
constexpr XMMRegister j_farg6 = xmm6;
constexpr XMMRegister j_farg7 = xmm7;
REGISTER_DECLARATION(Register, rscratch1, r10); // volatile
REGISTER_DECLARATION(Register, rscratch2, r11); // volatile
constexpr Register rscratch1 = r10; // volatile
constexpr Register rscratch2 = r11; // volatile
REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved
REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
constexpr Register r12_heapbase = r12; // callee-saved
constexpr Register r15_thread = r15; // callee-saved
#else
// rscratch1 will appear in 32bit code that is dead but of course must compile
@ -149,7 +149,7 @@ REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
// JSR 292
// On x86, the SP does not have to be saved when invoking method handle intrinsics
// or compiled lambda forms. We indicate that by setting rbp_mh_SP_save to noreg.
REGISTER_DECLARATION(Register, rbp_mh_SP_save, noreg);
constexpr Register rbp_mh_SP_save = noreg;
// Address is an abstraction used to represent a memory location
// using any of the amd64 addressing modes with one object.
@ -2932,7 +2932,7 @@ public:
// Set embedded opmask register specifier.
void set_embedded_opmask_register_specifier(KRegister mask) {
_embedded_opmask_register_specifier = (*mask).encoding() & 0x7;
_embedded_opmask_register_specifier = mask->encoding() & 0x7;
}
};

View File

@ -39,9 +39,9 @@ enum {
// registers
enum {
pd_nof_cpu_regs_frame_map = RegisterImpl::number_of_registers, // number of registers used during code emission
pd_nof_fpu_regs_frame_map = FloatRegisterImpl::number_of_registers, // number of registers used during code emission
pd_nof_xmm_regs_frame_map = XMMRegisterImpl::number_of_registers, // number of registers used during code emission
pd_nof_cpu_regs_frame_map = Register::number_of_registers, // number of registers used during code emission
pd_nof_fpu_regs_frame_map = FloatRegister::number_of_registers, // number of registers used during code emission
pd_nof_xmm_regs_frame_map = XMMRegister::number_of_registers, // number of registers used during code emission
#ifdef _LP64
#define UNALLOCATED 4 // rsp, rbp, r15, r10

View File

@ -146,7 +146,7 @@ LIR_Opr FrameMap::_caller_save_cpu_regs[] = {};
LIR_Opr FrameMap::_caller_save_fpu_regs[] = {};
LIR_Opr FrameMap::_caller_save_xmm_regs[] = {};
XMMRegister FrameMap::_xmm_regs [] = { 0, };
XMMRegister FrameMap::_xmm_regs[] = {};
XMMRegister FrameMap::nr2xmmreg(int rnr) {
assert(_init_done, "tables not initialized");

View File

@ -153,7 +153,7 @@
}
static int get_num_caller_save_xmms() {
return XMMRegisterImpl::available_xmm_registers();
return XMMRegister::available_xmm_registers();
}
static int nof_caller_save_cpu_regs() { return adjust_reg_range(pd_nof_caller_save_cpu_regs_frame_map); }

View File

@ -1953,7 +1953,7 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
Register newval = op->new_value()->as_register();
Register cmpval = op->cmp_value()->as_register();
assert(cmpval == rax, "wrong register");
assert(newval != NULL, "new val must be register");
assert(newval != noreg, "new val must be register");
assert(cmpval != newval, "cmp and new values must be in different registers");
assert(cmpval != addr, "cmp and addr must be in different registers");
assert(newval != addr, "new value and addr must be in different registers");
@ -1984,7 +1984,7 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
Register newval = op->new_value()->as_register_lo();
Register cmpval = op->cmp_value()->as_register_lo();
assert(cmpval == rax, "wrong register");
assert(newval != NULL, "new val must be register");
assert(newval != noreg, "new val must be register");
assert(cmpval != newval, "cmp and new values must be in different registers");
assert(cmpval != addr, "cmp and addr must be in different registers");
assert(newval != addr, "new value and addr must be in different registers");

View File

@ -101,7 +101,7 @@ inline void LinearScan::pd_add_temps(LIR_Op* op) {
// Implementation of LinearScanWalker
inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur) {
int last_xmm_reg = pd_first_xmm_reg + XMMRegisterImpl::available_xmm_registers() - 1;
int last_xmm_reg = pd_first_xmm_reg + XMMRegister::available_xmm_registers() - 1;
if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::byte_reg)) {
assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only");
_first_reg = pd_first_byte_reg;

View File

@ -43,11 +43,11 @@ void Compile::pd_compiler2_init() {
#endif // AMD64
if (UseAVX < 3) {
int delta = XMMRegisterImpl::max_slots_per_register * XMMRegisterImpl::number_of_registers;
int delta = XMMRegister::max_slots_per_register * XMMRegister::number_of_registers;
int bottom = ConcreteRegisterImpl::max_fpr;
int top = bottom + delta;
int middle = bottom + (delta / 2);
int xmm_slots = XMMRegisterImpl::max_slots_per_register;
int xmm_slots = XMMRegister::max_slots_per_register;
int lower = xmm_slots / 2;
// mark bad every register that we cannot get to if AVX less than 3, we have all slots in the array
// Note: vm2opto is allocated to ConcreteRegisterImpl::number_of_registers

View File

@ -41,7 +41,7 @@ void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) {
Register tmp2 = _tmp2->as_register();
Register result = result_opr()->as_register();
assert(cmpval == rax, "wrong register");
assert(newval != NULL, "new val must be register");
assert(newval != noreg, "new val must be register");
assert(cmpval != newval, "cmp and new values must be in different registers");
assert(cmpval != addr, "cmp and addr must be in different registers");
assert(newval != addr, "new value and addr must be in different registers");

View File

@ -814,7 +814,7 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
__ bind(L_failure);
__ bind(L_success);
} else {
assert(res != NULL, "need result register");
assert(res != noreg, "need result register");
Label exit;
__ bind(L_failure);

View File

@ -62,7 +62,7 @@ instruct compareAndExchangeP_shenandoah(memory mem_ptr,
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
NULL, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
noreg, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
true, // exchange
$tmp1$$Register, $tmp2$$Register
);

View File

@ -82,7 +82,7 @@ instruct compareAndExchangeN_shenandoah(memory mem_ptr,
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
NULL, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
noreg, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
true, // exchange
$tmp1$$Register, $tmp2$$Register
);
@ -104,7 +104,7 @@ instruct compareAndExchangeP_shenandoah(memory mem_ptr,
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
NULL, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
noreg, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
true, // exchange
$tmp1$$Register, $tmp2$$Register
);

View File

@ -210,11 +210,11 @@ void CodeInstaller::pd_relocate_poll(address pc, jint mark, JVMCI_TRAPS) {
// convert JVMCI register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, JVMCI_TRAPS) {
if (jvmci_reg < RegisterImpl::number_of_registers) {
if (jvmci_reg < Register::number_of_registers) {
return as_Register(jvmci_reg)->as_VMReg();
} else {
jint floatRegisterNumber = jvmci_reg - RegisterImpl::number_of_registers;
if (floatRegisterNumber < XMMRegisterImpl::number_of_registers) {
jint floatRegisterNumber = jvmci_reg - Register::number_of_registers;
if (floatRegisterNumber < XMMRegister::number_of_registers) {
return as_XMMRegister(floatRegisterNumber)->as_VMReg();
}
JVMCI_ERROR_NULL("invalid register number: %d", jvmci_reg);

View File

@ -3772,7 +3772,7 @@ RegSet MacroAssembler::call_clobbered_gp_registers() {
}
XMMRegSet MacroAssembler::call_clobbered_xmm_registers() {
int num_xmm_registers = XMMRegisterImpl::available_xmm_registers();
int num_xmm_registers = XMMRegister::available_xmm_registers();
#if defined(WINDOWS) && defined(_LP64)
XMMRegSet result = XMMRegSet::range(xmm0, xmm5);
if (num_xmm_registers > 16) {
@ -3813,7 +3813,7 @@ static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister r
int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers, bool save_fpu,
int& gp_area_size, int& fp_area_size, int& xmm_area_size) {
gp_area_size = align_up(gp_registers.size() * RegisterImpl::max_slots_per_register * VMRegImpl::stack_slot_size,
gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size,
StackAlignmentInBytes);
#ifdef _LP64
fp_area_size = 0;
@ -3906,7 +3906,7 @@ void MacroAssembler::pop_set(XMMRegSet set, int offset) {
void MacroAssembler::push_set(RegSet set, int offset) {
int spill_offset;
if (offset == -1) {
int register_push_size = set.size() * RegisterImpl::max_slots_per_register * VMRegImpl::stack_slot_size;
int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size;
int aligned_size = align_up(register_push_size, StackAlignmentInBytes);
subptr(rsp, aligned_size);
spill_offset = 0;
@ -3916,13 +3916,13 @@ void MacroAssembler::push_set(RegSet set, int offset) {
for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) {
movptr(Address(rsp, spill_offset), *it);
spill_offset += RegisterImpl::max_slots_per_register * VMRegImpl::stack_slot_size;
spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size;
}
}
void MacroAssembler::pop_set(RegSet set, int offset) {
int gp_reg_size = RegisterImpl::max_slots_per_register * VMRegImpl::stack_slot_size;
int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size;
int restore_size = set.size() * gp_reg_size;
int aligned_size = align_up(restore_size, StackAlignmentInBytes);

View File

@ -56,9 +56,9 @@ void MacroAssembler::lastroundDec(XMMRegister key, int rnum) {
}
// Load key and shuffle operation
void MacroAssembler::ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
void MacroAssembler::ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask) {
movdqu(xmmdst, Address(key, offset));
if (xmm_shuf_mask != NULL) {
if (xmm_shuf_mask != xnoreg) {
pshufb(xmmdst, xmm_shuf_mask);
} else {
pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));

View File

@ -531,12 +531,12 @@ void trace_method_handle_stub(const char* adaptername,
ResourceMark rm;
LogStream ls(lt);
ls.print_cr("Registers:");
const int saved_regs_count = RegisterImpl::number_of_registers;
const int saved_regs_count = Register::number_of_registers;
for (int i = 0; i < saved_regs_count; i++) {
Register r = as_Register(i);
// The registers are stored in reverse order on the stack (by pusha).
#ifdef AMD64
assert(RegisterImpl::number_of_registers == 16, "sanity");
assert(Register::number_of_registers == 16, "sanity");
if (r == rsp) {
// rsp is actually not stored by pusha(), compute the old rsp from saved_regs (rsp after pusha): saved_regs + 16 = old rsp
ls.print("%3s=" PTR_FORMAT, r->name(), (intptr_t)(&saved_regs[16]));

View File

@ -29,8 +29,8 @@
address RegisterMap::pd_location(VMReg reg) const {
if (reg->is_XMMRegister()) {
int reg_base = reg->value() - ConcreteRegisterImpl::max_fpr;
int base_reg_enc = (reg_base / XMMRegisterImpl::max_slots_per_register);
assert(base_reg_enc >= 0 && base_reg_enc < XMMRegisterImpl::number_of_registers, "invalid XMMRegister: %d", base_reg_enc);
int base_reg_enc = (reg_base / XMMRegister::max_slots_per_register);
assert(base_reg_enc >= 0 && base_reg_enc < XMMRegister::number_of_registers, "invalid XMMRegister: %d", base_reg_enc);
VMReg base_reg = as_XMMRegister(base_reg_enc)->as_VMReg();
intptr_t offset_in_bytes = (reg->value() - base_reg->value()) * VMRegImpl::stack_slot_size;
if (base_reg_enc > 15) {

View File

@ -26,81 +26,43 @@
#include "register_x86.hpp"
REGISTER_IMPL_DEFINITION(Register, RegisterImpl, RegisterImpl::number_of_registers);
REGISTER_IMPL_DEFINITION(FloatRegister, FloatRegisterImpl, FloatRegisterImpl::number_of_registers);
REGISTER_IMPL_DEFINITION(XMMRegister, XMMRegisterImpl, XMMRegisterImpl::number_of_registers);
REGISTER_IMPL_DEFINITION(KRegister, KRegisterImpl, KRegisterImpl::number_of_registers);
Register::RegisterImpl all_RegisterImpls [Register::number_of_registers + 1];
FloatRegister::FloatRegisterImpl all_FloatRegisterImpls[FloatRegister::number_of_registers + 1];
XMMRegister::XMMRegisterImpl all_XMMRegisterImpls [XMMRegister::number_of_registers + 1];
KRegister::KRegisterImpl all_KRegisterImpls [KRegister::number_of_registers + 1];
#ifndef AMD64
const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers;
#else
const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers << 1;
#endif // AMD64
const int ConcreteRegisterImpl::max_fpr = ConcreteRegisterImpl::max_gpr +
2 * FloatRegisterImpl::number_of_registers;
const int ConcreteRegisterImpl::max_xmm = ConcreteRegisterImpl::max_fpr +
XMMRegisterImpl::max_slots_per_register * XMMRegisterImpl::number_of_registers;
const int ConcreteRegisterImpl::max_kpr = ConcreteRegisterImpl::max_xmm +
KRegisterImpl::max_slots_per_register * KRegisterImpl::number_of_registers;
const char* RegisterImpl::name() const {
const char * Register::RegisterImpl::name() const {
static const char *const names[number_of_registers] = {
#ifndef AMD64
"eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
#else
#ifdef _LP64
"rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
#endif // AMD64
#else
"eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
#endif // _LP64
};
return is_valid() ? names[encoding()] : "noreg";
}
const char* FloatRegisterImpl::name() const {
const char* FloatRegister::FloatRegisterImpl::name() const {
static const char *const names[number_of_registers] = {
"st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7"
};
return is_valid() ? names[encoding()] : "noreg";
return is_valid() ? names[encoding()] : "fnoreg";
}
const char* XMMRegisterImpl::name() const {
const char* XMMRegister::XMMRegisterImpl::name() const {
static const char *const names[number_of_registers] = {
"xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7"
#ifdef AMD64
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
#ifdef _LP64
,"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
,"xmm16", "xmm17", "xmm18", "xmm19", "xmm20", "xmm21", "xmm22", "xmm23"
,"xmm24", "xmm25", "xmm26", "xmm27", "xmm28", "xmm29", "xmm30", "xmm31"
#endif // AMD64
#endif // _LP64
};
return is_valid() ? names[encoding()] : "xnoreg";
}
const char* XMMRegisterImpl::sub_word_name(int i) const {
const char* names[number_of_registers * 8] = {
"xmm0:0", "xmm0:1", "xmm0:2", "xmm0:3", "xmm0:4", "xmm0:5", "xmm0:6", "xmm0:7",
"xmm1:0", "xmm1:1", "xmm1:2", "xmm1:3", "xmm1:4", "xmm1:5", "xmm1:6", "xmm1:7",
"xmm2:0", "xmm2:1", "xmm2:2", "xmm2:3", "xmm2:4", "xmm2:5", "xmm2:6", "xmm2:7",
"xmm3:0", "xmm3:1", "xmm3:2", "xmm3:3", "xmm3:4", "xmm3:5", "xmm3:6", "xmm3:7",
"xmm4:0", "xmm4:1", "xmm4:2", "xmm4:3", "xmm4:4", "xmm4:5", "xmm4:6", "xmm4:7",
"xmm5:0", "xmm5:1", "xmm5:2", "xmm5:3", "xmm5:4", "xmm5:5", "xmm5:6", "xmm5:7",
"xmm6:0", "xmm6:1", "xmm6:2", "xmm6:3", "xmm6:4", "xmm6:5", "xmm6:6", "xmm6:7",
"xmm7:0", "xmm7:1", "xmm7:2", "xmm7:3", "xmm7:4", "xmm7:5", "xmm7:6", "xmm7:7",
#ifdef AMD64
"xmm8:0", "xmm8:1", "xmm8:2", "xmm8:3", "xmm8:4", "xmm8:5", "xmm8:6", "xmm8:7",
"xmm9:0", "xmm9:1", "xmm9:2", "xmm9:3", "xmm9:4", "xmm9:5", "xmm9:6", "xmm9:7",
"xmm10:0", "xmm10:1", "xmm10:2", "xmm10:3", "xmm10:4", "xmm10:5", "xmm10:6", "xmm10:7",
"xmm11:0", "xmm11:1", "xmm11:2", "xmm11:3", "xmm11:4", "xmm11:5", "xmm11:6", "xmm11:7",
"xmm12:0", "xmm12:1", "xmm12:2", "xmm12:3", "xmm12:4", "xmm12:5", "xmm12:6", "xmm12:7",
"xmm13:0", "xmm13:1", "xmm13:2", "xmm13:3", "xmm13:4", "xmm13:5", "xmm13:6", "xmm13:7",
"xmm14:0", "xmm14:1", "xmm14:2", "xmm14:3", "xmm14:4", "xmm14:5", "xmm14:6", "xmm14:7",
"xmm15:0", "xmm15:1", "xmm15:2", "xmm15:3", "xmm15:4", "xmm15:5", "xmm15:6", "xmm15:7",
#endif // AMD64
};
assert(i >= 0 && i < 8, "offset too large");
return is_valid() ? names[encoding() * 8 + i] : "xnoreg";
}
const char* KRegisterImpl::name() const {
const char* KRegister::KRegisterImpl::name() const {
const char* names[number_of_registers] = {
"k0", "k1", "k2", "k3", "k4", "k5", "k6", "k7"
};

View File

@ -33,208 +33,271 @@
class VMRegImpl;
typedef VMRegImpl* VMReg;
// Use Register as shortcut
class RegisterImpl;
typedef RegisterImpl* Register;
// The implementation of integer registers for the x86/x64 architectures.
class Register {
private:
int _encoding;
// The implementation of integer registers for the ia32 architecture
inline constexpr Register as_Register(int encoding);
class RegisterImpl: public AbstractRegisterImpl {
static constexpr Register first();
constexpr Register(int encoding, bool unused) : _encoding(encoding) {}
public:
inline friend constexpr Register as_Register(int encoding);
enum {
#ifndef AMD64
number_of_registers = 8,
number_of_byte_registers = 4,
max_slots_per_register = 1
#else
number_of_registers = 16,
number_of_byte_registers = 16,
max_slots_per_register = 2
#endif // AMD64
number_of_registers = LP64_ONLY( 16 ) NOT_LP64( 8 ),
number_of_byte_registers = LP64_ONLY( 16 ) NOT_LP64( 4 ),
max_slots_per_register = LP64_ONLY( 2 ) NOT_LP64( 1 )
};
// derived registers, offsets, and addresses
Register successor() const { return as_Register(encoding() + 1); }
class RegisterImpl: public AbstractRegisterImpl {
friend class Register;
// construction
inline constexpr friend Register as_Register(int encoding);
static constexpr RegisterImpl* first();
inline VMReg as_VMReg() const;
public:
// accessors
int raw_encoding() const { return this - first(); }
int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
bool has_byte_register() const { return 0 <= raw_encoding() && raw_encoding() < number_of_byte_registers; }
// accessors
int raw_encoding() const { return this - first(); }
int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
bool has_byte_register() const { return 0 <= raw_encoding() && raw_encoding() < number_of_byte_registers; }
const char *name() const;
// derived registers, offsets, and addresses
inline Register successor() const;
inline VMReg as_VMReg() const;
const char* name() const;
};
constexpr Register() : _encoding(-1) {} // noreg
int operator==(const Register r) const { return _encoding == r._encoding; }
int operator!=(const Register r) const { return _encoding != r._encoding; }
const RegisterImpl* operator->() const { return RegisterImpl::first() + _encoding; }
};
REGISTER_IMPL_DECLARATION(Register, RegisterImpl, RegisterImpl::number_of_registers);
extern Register::RegisterImpl all_RegisterImpls[Register::number_of_registers + 1] INTERNAL_VISIBILITY;
// The integer registers of the ia32/amd64 architecture
inline constexpr Register::RegisterImpl* Register::RegisterImpl::first() {
return all_RegisterImpls + 1;
}
CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1));
constexpr Register noreg = Register();
inline constexpr Register as_Register(int encoding) {
if (0 <= encoding && encoding < Register::number_of_registers) {
return Register(encoding, false);
}
return noreg;
}
inline Register Register::RegisterImpl::successor() const {
assert(is_valid(), "sanity");
return as_Register(encoding() + 1);
}
constexpr Register rax = as_Register(0);
constexpr Register rcx = as_Register(1);
constexpr Register rdx = as_Register(2);
constexpr Register rbx = as_Register(3);
constexpr Register rsp = as_Register(4);
constexpr Register rbp = as_Register(5);
constexpr Register rsi = as_Register(6);
constexpr Register rdi = as_Register(7);
#ifdef _LP64
constexpr Register r8 = as_Register( 8);
constexpr Register r9 = as_Register( 9);
constexpr Register r10 = as_Register(10);
constexpr Register r11 = as_Register(11);
constexpr Register r12 = as_Register(12);
constexpr Register r13 = as_Register(13);
constexpr Register r14 = as_Register(14);
constexpr Register r15 = as_Register(15);
#endif // _LP64
CONSTANT_REGISTER_DECLARATION(Register, rax, (0));
CONSTANT_REGISTER_DECLARATION(Register, rcx, (1));
CONSTANT_REGISTER_DECLARATION(Register, rdx, (2));
CONSTANT_REGISTER_DECLARATION(Register, rbx, (3));
CONSTANT_REGISTER_DECLARATION(Register, rsp, (4));
CONSTANT_REGISTER_DECLARATION(Register, rbp, (5));
CONSTANT_REGISTER_DECLARATION(Register, rsi, (6));
CONSTANT_REGISTER_DECLARATION(Register, rdi, (7));
#ifdef AMD64
CONSTANT_REGISTER_DECLARATION(Register, r8, (8));
CONSTANT_REGISTER_DECLARATION(Register, r9, (9));
CONSTANT_REGISTER_DECLARATION(Register, r10, (10));
CONSTANT_REGISTER_DECLARATION(Register, r11, (11));
CONSTANT_REGISTER_DECLARATION(Register, r12, (12));
CONSTANT_REGISTER_DECLARATION(Register, r13, (13));
CONSTANT_REGISTER_DECLARATION(Register, r14, (14));
CONSTANT_REGISTER_DECLARATION(Register, r15, (15));
#endif // AMD64
// The implementation of x87 floating point registers for the ia32 architecture.
class FloatRegister {
private:
int _encoding;
// Use FloatRegister as shortcut
class FloatRegisterImpl;
typedef const FloatRegisterImpl* FloatRegister;
inline constexpr FloatRegister as_FloatRegister(int encoding);
// The implementation of floating point registers for the ia32 architecture
class FloatRegisterImpl: public AbstractRegisterImpl {
static constexpr FloatRegister first();
constexpr FloatRegister(int encoding, bool unused) : _encoding(encoding) {}
public:
enum {
number_of_registers = 8
};
// construction
inline friend constexpr FloatRegister as_FloatRegister(int encoding);
inline VMReg as_VMReg() const;
// derived registers, offsets, and addresses
FloatRegister successor() const { return as_FloatRegister(encoding() + 1); }
// accessors
int raw_encoding() const { return this - first(); }
int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
const char* name() const;
};
REGISTER_IMPL_DECLARATION(FloatRegister, FloatRegisterImpl, FloatRegisterImpl::number_of_registers);
CONSTANT_REGISTER_DECLARATION(FloatRegister, fnoreg, (-1));
// Use XMMRegister as shortcut
class XMMRegisterImpl;
typedef XMMRegisterImpl* XMMRegister;
inline constexpr XMMRegister as_XMMRegister(int encoding);
// The implementation of XMM registers.
class XMMRegisterImpl: public AbstractRegisterImpl {
static constexpr XMMRegister first();
public:
enum {
#ifndef AMD64
number_of_registers = 8,
max_slots_per_register = 16 // 512-bit
#else
number_of_registers = 32,
max_slots_per_register = 16 // 512-bit
#endif // AMD64
number_of_registers = 8,
max_slots_per_register = 2
};
// construction
friend constexpr XMMRegister as_XMMRegister(int encoding);
class FloatRegisterImpl: public AbstractRegisterImpl {
friend class FloatRegister;
inline VMReg as_VMReg() const;
static constexpr FloatRegisterImpl* first();
// derived registers, offsets, and addresses
XMMRegister successor() const { return as_XMMRegister(encoding() + 1); }
public:
// accessors
int raw_encoding() const { return this - first(); }
int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
// accessors
int raw_encoding() const { return this - first(); }
int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
const char* name() const;
const char* sub_word_name(int offset) const;
// derived registers, offsets, and addresses
inline FloatRegister successor() const;
// Actually available XMM registers for use, depending on actual CPU capabilities
// and flags.
inline VMReg as_VMReg() const;
const char* name() const;
};
constexpr FloatRegister() : _encoding(-1) {} // fnoreg
int operator==(const FloatRegister r) const { return _encoding == r._encoding; }
int operator!=(const FloatRegister r) const { return _encoding != r._encoding; }
const FloatRegisterImpl* operator->() const { return FloatRegisterImpl::first() + _encoding; }
};
extern FloatRegister::FloatRegisterImpl all_FloatRegisterImpls[FloatRegister::number_of_registers + 1] INTERNAL_VISIBILITY;
inline constexpr FloatRegister::FloatRegisterImpl* FloatRegister::FloatRegisterImpl::first() {
return all_FloatRegisterImpls + 1;
}
constexpr FloatRegister fnoreg = FloatRegister();
inline constexpr FloatRegister as_FloatRegister(int encoding) {
if (0 <= encoding && encoding < FloatRegister::number_of_registers) {
return FloatRegister(encoding, false);
}
return fnoreg;
}
inline FloatRegister FloatRegister::FloatRegisterImpl::successor() const {
assert(is_valid(), "sanity");
return as_FloatRegister(encoding() + 1);
}
// The implementation of XMM registers.
class XMMRegister {
private:
int _encoding;
constexpr XMMRegister(int encoding, bool unused) : _encoding(encoding) {}
public:
inline friend constexpr XMMRegister as_XMMRegister(int encoding);
enum {
number_of_registers = LP64_ONLY( 32 ) NOT_LP64( 8 ),
max_slots_per_register = LP64_ONLY( 16 ) NOT_LP64( 16 ) // 512-bit
};
class XMMRegisterImpl: public AbstractRegisterImpl {
friend class XMMRegister;
static constexpr XMMRegisterImpl* first();
public:
// accessors
int raw_encoding() const { return this - first(); }
int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
// derived registers, offsets, and addresses
inline XMMRegister successor() const;
inline VMReg as_VMReg() const;
const char* name() const;
};
constexpr XMMRegister() : _encoding(-1) {} // xnoreg
int operator==(const XMMRegister r) const { return _encoding == r._encoding; }
int operator!=(const XMMRegister r) const { return _encoding != r._encoding; }
const XMMRegisterImpl* operator->() const { return XMMRegisterImpl::first() + _encoding; }
// Actually available XMM registers for use, depending on actual CPU capabilities and flags.
static int available_xmm_registers() {
int num_xmm_regs = XMMRegisterImpl::number_of_registers;
#ifdef _LP64
if (UseAVX < 3) {
num_xmm_regs /= 2;
return number_of_registers / 2;
}
#endif
return num_xmm_regs;
#endif // _LP64
return number_of_registers;
}
};
extern XMMRegister::XMMRegisterImpl all_XMMRegisterImpls[XMMRegister::number_of_registers + 1] INTERNAL_VISIBILITY;
REGISTER_IMPL_DECLARATION(XMMRegister, XMMRegisterImpl, XMMRegisterImpl::number_of_registers);
inline constexpr XMMRegister::XMMRegisterImpl* XMMRegister::XMMRegisterImpl::first() {
return all_XMMRegisterImpls + 1;
}
// The XMM registers, for P3 and up chips
CONSTANT_REGISTER_DECLARATION(XMMRegister, xnoreg , (-1));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm0 , ( 0));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm1 , ( 1));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm2 , ( 2));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm3 , ( 3));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm4 , ( 4));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm5 , ( 5));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm6 , ( 6));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm7 , ( 7));
#ifdef AMD64
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm8, (8));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm9, (9));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm10, (10));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm11, (11));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm12, (12));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm13, (13));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm14, (14));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm15, (15));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm16, (16));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm17, (17));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm18, (18));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm19, (19));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm20, (20));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm21, (21));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm22, (22));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm23, (23));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm24, (24));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm25, (25));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm26, (26));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm27, (27));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm28, (28));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm29, (29));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm30, (30));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm31, (31));
#endif // AMD64
constexpr XMMRegister xnoreg = XMMRegister();
// Use KRegister as shortcut
class KRegisterImpl;
typedef KRegisterImpl* KRegister;
inline constexpr XMMRegister as_XMMRegister(int encoding) {
if (0 <= encoding && encoding < XMMRegister::number_of_registers) {
return XMMRegister(encoding, false);
}
return xnoreg;
}
inline constexpr KRegister as_KRegister(int encoding);
inline XMMRegister XMMRegister::XMMRegisterImpl::successor() const {
assert(is_valid(), "sanity");
return as_XMMRegister(encoding() + 1);
}
// The implementation of AVX-3 (AVX-512) opmask registers.
class KRegisterImpl : public AbstractRegisterImpl {
static constexpr KRegister first();
constexpr XMMRegister xmm0 = as_XMMRegister( 0);
constexpr XMMRegister xmm1 = as_XMMRegister( 1);
constexpr XMMRegister xmm2 = as_XMMRegister( 2);
constexpr XMMRegister xmm3 = as_XMMRegister( 3);
constexpr XMMRegister xmm4 = as_XMMRegister( 4);
constexpr XMMRegister xmm5 = as_XMMRegister( 5);
constexpr XMMRegister xmm6 = as_XMMRegister( 6);
constexpr XMMRegister xmm7 = as_XMMRegister( 7);
#ifdef _LP64
constexpr XMMRegister xmm8 = as_XMMRegister( 8);
constexpr XMMRegister xmm9 = as_XMMRegister( 9);
constexpr XMMRegister xmm10 = as_XMMRegister(10);
constexpr XMMRegister xmm11 = as_XMMRegister(11);
constexpr XMMRegister xmm12 = as_XMMRegister(12);
constexpr XMMRegister xmm13 = as_XMMRegister(13);
constexpr XMMRegister xmm14 = as_XMMRegister(14);
constexpr XMMRegister xmm15 = as_XMMRegister(15);
constexpr XMMRegister xmm16 = as_XMMRegister(16);
constexpr XMMRegister xmm17 = as_XMMRegister(17);
constexpr XMMRegister xmm18 = as_XMMRegister(18);
constexpr XMMRegister xmm19 = as_XMMRegister(19);
constexpr XMMRegister xmm20 = as_XMMRegister(20);
constexpr XMMRegister xmm21 = as_XMMRegister(21);
constexpr XMMRegister xmm22 = as_XMMRegister(22);
constexpr XMMRegister xmm23 = as_XMMRegister(23);
constexpr XMMRegister xmm24 = as_XMMRegister(24);
constexpr XMMRegister xmm25 = as_XMMRegister(25);
constexpr XMMRegister xmm26 = as_XMMRegister(26);
constexpr XMMRegister xmm27 = as_XMMRegister(27);
constexpr XMMRegister xmm28 = as_XMMRegister(28);
constexpr XMMRegister xmm29 = as_XMMRegister(29);
constexpr XMMRegister xmm30 = as_XMMRegister(30);
constexpr XMMRegister xmm31 = as_XMMRegister(31);
#endif // _LP64
// The implementation of AVX-512 opmask registers.
class KRegister {
private:
int _encoding;
constexpr KRegister(int encoding, bool unused) : _encoding(encoding) {}
public:
inline friend constexpr KRegister as_KRegister(int encoding);
enum {
number_of_registers = 8,
// opmask registers are 64bit wide on both 32 and 64 bit targets.
@ -242,60 +305,87 @@ public:
max_slots_per_register = 2
};
// construction
friend constexpr KRegister as_KRegister(int encoding);
class KRegisterImpl: public AbstractRegisterImpl {
friend class KRegister;
inline VMReg as_VMReg() const;
static constexpr KRegisterImpl* first();
// derived registers, offsets, and addresses
KRegister successor() const { return as_KRegister(encoding() + 1); }
public:
// accessors
int raw_encoding() const { return this - first(); }
int encoding() const { assert(is_valid(), "invalid register (%d)", (int)raw_encoding()); return raw_encoding(); }
bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
const char* name() const;
// accessors
int raw_encoding() const { return this - first(); }
int encoding() const { assert(is_valid(), "invalid register"); return raw_encoding(); }
bool is_valid() const { return 0 <= raw_encoding() && raw_encoding() < number_of_registers; }
// derived registers, offsets, and addresses
inline KRegister successor() const;
inline VMReg as_VMReg() const;
const char* name() const;
};
constexpr KRegister() : _encoding(-1) {} // knoreg
int operator==(const KRegister r) const { return _encoding == r._encoding; }
int operator!=(const KRegister r) const { return _encoding != r._encoding; }
const KRegisterImpl* operator->() const { return KRegisterImpl::first() + _encoding; }
};
REGISTER_IMPL_DECLARATION(KRegister, KRegisterImpl, KRegisterImpl::number_of_registers);
extern KRegister::KRegisterImpl all_KRegisterImpls[KRegister::number_of_registers + 1] INTERNAL_VISIBILITY;
inline constexpr KRegister::KRegisterImpl* KRegister::KRegisterImpl::first() {
return all_KRegisterImpls + 1;
}
constexpr KRegister knoreg = KRegister();
inline constexpr KRegister as_KRegister(int encoding) {
if (0 <= encoding && encoding < KRegister::number_of_registers) {
return KRegister(encoding, false);
}
return knoreg;
}
inline KRegister KRegister::KRegisterImpl::successor() const {
assert(is_valid(), "sanity");
return as_KRegister(encoding() + 1);
}
constexpr KRegister k0 = as_KRegister(0);
constexpr KRegister k1 = as_KRegister(1);
constexpr KRegister k2 = as_KRegister(2);
constexpr KRegister k3 = as_KRegister(3);
constexpr KRegister k4 = as_KRegister(4);
constexpr KRegister k5 = as_KRegister(5);
constexpr KRegister k6 = as_KRegister(6);
constexpr KRegister k7 = as_KRegister(7);
// The Mask registers, for AVX3 enabled and up chips
CONSTANT_REGISTER_DECLARATION(KRegister, knoreg, (-1));
CONSTANT_REGISTER_DECLARATION(KRegister, k0, (0));
CONSTANT_REGISTER_DECLARATION(KRegister, k1, (1));
CONSTANT_REGISTER_DECLARATION(KRegister, k2, (2));
CONSTANT_REGISTER_DECLARATION(KRegister, k3, (3));
CONSTANT_REGISTER_DECLARATION(KRegister, k4, (4));
CONSTANT_REGISTER_DECLARATION(KRegister, k5, (5));
CONSTANT_REGISTER_DECLARATION(KRegister, k6, (6));
CONSTANT_REGISTER_DECLARATION(KRegister, k7, (7));
// Need to know the total number of registers of all sorts for SharedInfo.
// Define a class that exports it.
class ConcreteRegisterImpl : public AbstractRegisterImpl {
public:
enum {
// A big enough number for C2: all the registers plus flags
// This number must be large enough to cover REG_COUNT (defined by c2) registers.
// There is no requirement that any ordering here matches any ordering c2 gives
// it's optoregs.
max_gpr = Register::number_of_registers * Register::max_slots_per_register,
max_fpr = max_gpr + FloatRegister::number_of_registers * FloatRegister::max_slots_per_register,
max_xmm = max_fpr + XMMRegister::number_of_registers * XMMRegister::max_slots_per_register,
max_kpr = max_xmm + KRegister::number_of_registers * KRegister::max_slots_per_register,
// x86_32.ad defines additional dummy FILL0-FILL7 registers, in order to tally
// REG_COUNT (computed by ADLC based on the number of reg_defs seen in .ad files)
// with ConcreteRegisterImpl::number_of_registers additional count of 8 is being
// added for 32 bit jvm.
number_of_registers = RegisterImpl::number_of_registers * RegisterImpl::max_slots_per_register +
2 * FloatRegisterImpl::number_of_registers + NOT_LP64(8) LP64_ONLY(0) +
XMMRegisterImpl::max_slots_per_register * XMMRegisterImpl::number_of_registers +
KRegisterImpl::number_of_registers * KRegisterImpl::max_slots_per_register + // mask registers
1 // eflags
// A big enough number for C2: all the registers plus flags
// This number must be large enough to cover REG_COUNT (defined by c2) registers.
// There is no requirement that any ordering here matches any ordering c2 gives
// it's optoregs.
// x86_32.ad defines additional dummy FILL0-FILL7 registers, in order to tally
// REG_COUNT (computed by ADLC based on the number of reg_defs seen in .ad files)
// with ConcreteRegisterImpl::number_of_registers additional count of 8 is being
// added for 32 bit jvm.
number_of_registers = max_kpr + // gpr/fpr/xmm/kpr
NOT_LP64( 8 + ) // FILL0-FILL7 in x86_32.ad
1 // eflags
};
static const int max_gpr;
static const int max_fpr;
static const int max_xmm;
static const int max_kpr;
};
template <>

View File

@ -128,11 +128,11 @@ class RegisterSaver {
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
int* total_frame_words, bool verify_fpu, bool save_vectors) {
int num_xmm_regs = XMMRegisterImpl::number_of_registers;
int num_xmm_regs = XMMRegister::number_of_registers;
int ymm_bytes = num_xmm_regs * 16;
int zmm_bytes = num_xmm_regs * 32;
#ifdef COMPILER2
int opmask_state_bytes = KRegisterImpl::number_of_registers * 8;
int opmask_state_bytes = KRegister::number_of_registers * 8;
if (save_vectors) {
assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
@ -199,7 +199,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
int delta = st1_off - off;
// Save the FPU registers in de-opt-able form
for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
for (int n = 0; n < FloatRegister::number_of_registers; n++) {
__ fstp_d(Address(rsp, off*wordSize));
off += delta;
}
@ -235,7 +235,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
}
__ subptr(rsp, opmask_state_bytes);
// Save opmask registers
for (int n = 0; n < KRegisterImpl::number_of_registers; n++) {
for (int n = 0; n < KRegister::number_of_registers; n++) {
__ kmov(Address(rsp, n*8), as_KRegister(n));
}
}
@ -268,7 +268,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
// %%% This is really a waste but we'll keep things as they were for now for the upper component
off = st0_off;
delta = st1_off - off;
for (int n = 0; n < FloatRegisterImpl::number_of_registers; n++) {
for (int n = 0; n < FloatRegister::number_of_registers; n++) {
FloatRegister freg_name = as_FloatRegister(n);
map->set_callee_saved(STACK_OFFSET(off), freg_name->as_VMReg());
map->set_callee_saved(STACK_OFFSET(off+1), NEXTREG(freg_name));
@ -291,7 +291,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
int opmask_state_bytes = 0;
int additional_frame_bytes = 0;
int num_xmm_regs = XMMRegisterImpl::number_of_registers;
int num_xmm_regs = XMMRegister::number_of_registers;
int ymm_bytes = num_xmm_regs * 16;
int zmm_bytes = num_xmm_regs * 32;
// Recover XMM & FPU state
@ -304,7 +304,7 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_ve
if (UseAVX > 2) {
// Save upper half of ZMM registers as well
additional_frame_bytes += zmm_bytes;
opmask_state_bytes = KRegisterImpl::number_of_registers * 8;
opmask_state_bytes = KRegister::number_of_registers * 8;
additional_frame_bytes += opmask_state_bytes;
}
}
@ -345,7 +345,7 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_ve
for (int n = 0; n < num_xmm_regs; n++) {
__ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, n*32+off));
}
for (int n = 0; n < KRegisterImpl::number_of_registers; n++) {
for (int n = 0; n < KRegister::number_of_registers; n++) {
__ kmov(as_KRegister(n), Address(rsp, n*8));
}
}
@ -412,8 +412,8 @@ static int reg2offset_out(VMReg r) {
// refer to 4-byte stack slots. All stack slots are based off of the stack pointer
// as framesizes are fixed.
// VMRegImpl::stack0 refers to the first slot 0(sp).
// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
// up to RegisterImpl::number_of_registers) are the 32-bit
// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
// Register up to Register::number_of_registers are the 32-bit
// integer registers.
// Pass first two oop/int args in registers ECX and EDX.

View File

@ -174,7 +174,7 @@ class RegisterSaver {
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_wide_vectors) {
int off = 0;
int num_xmm_regs = XMMRegisterImpl::available_xmm_registers();
int num_xmm_regs = XMMRegister::available_xmm_registers();
#if COMPILER2_OR_JVMCI
if (save_wide_vectors && UseAVX == 0) {
save_wide_vectors = false; // vectors larger than 16 byte long are supported only with AVX
@ -224,7 +224,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
#if COMPILER2_OR_JVMCI
base_addr = XSAVE_AREA_OPMASK_BEGIN;
off = 0;
for(int n = 0; n < KRegisterImpl::number_of_registers; n++) {
for(int n = 0; n < KRegister::number_of_registers; n++) {
__ kmov(Address(rsp, base_addr+(off++*8)), as_KRegister(n));
}
#endif
@ -241,7 +241,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
#if COMPILER2_OR_JVMCI
base_addr = XSAVE_AREA_OPMASK_BEGIN;
off = 0;
for(int n = 0; n < KRegisterImpl::number_of_registers; n++) {
for(int n = 0; n < KRegister::number_of_registers; n++) {
__ kmov(Address(rsp, base_addr+(off++*8)), as_KRegister(n));
}
#endif
@ -364,7 +364,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
}
void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_wide_vectors) {
int num_xmm_regs = XMMRegisterImpl::available_xmm_registers();
int num_xmm_regs = XMMRegister::available_xmm_registers();
if (frame::arg_reg_save_area_bytes != 0) {
// Pop arg register save area
__ addptr(rsp, frame::arg_reg_save_area_bytes);
@ -404,7 +404,7 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_wi
#if COMPILER2_OR_JVMCI
base_addr = XSAVE_AREA_OPMASK_BEGIN;
off = 0;
for (int n = 0; n < KRegisterImpl::number_of_registers; n++) {
for (int n = 0; n < KRegister::number_of_registers; n++) {
__ kmov(as_KRegister(n), Address(rsp, base_addr+(off++*8)));
}
#endif
@ -421,7 +421,7 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_wi
#if COMPILER2_OR_JVMCI
base_addr = XSAVE_AREA_OPMASK_BEGIN;
off = 0;
for (int n = 0; n < KRegisterImpl::number_of_registers; n++) {
for (int n = 0; n < KRegister::number_of_registers; n++) {
__ kmov(as_KRegister(n), Address(rsp, base_addr+(off++*8)));
}
#endif
@ -465,8 +465,8 @@ bool SharedRuntime::is_wide_vector(int size) {
// refer to 4-byte stack slots. All stack slots are based off of the stack pointer
// as framesizes are fixed.
// VMRegImpl::stack0 refers to the first slot 0(sp).
// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
// up to RegisterImpl::number_of_registers) are the 64-bit
// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.
// Register up to Register::number_of_registers are the 64-bit
// integer registers.
// Note: the INPUTS in sig_bt are in units of Java argument words, which are
@ -1807,12 +1807,12 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
#ifdef ASSERT
bool reg_destroyed[RegisterImpl::number_of_registers];
bool freg_destroyed[XMMRegisterImpl::number_of_registers];
for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
bool reg_destroyed[Register::number_of_registers];
bool freg_destroyed[XMMRegister::number_of_registers];
for ( int r = 0 ; r < Register::number_of_registers ; r++ ) {
reg_destroyed[r] = false;
}
for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
for ( int f = 0 ; f < XMMRegister::number_of_registers ; f++ ) {
freg_destroyed[f] = false;
}

View File

@ -2204,9 +2204,9 @@ class StubGenerator: public StubCodeGenerator {
// Utility routine for loading a 128-bit key word in little endian format
// can optionally specify that the shuffle mask is already in an xmmregister
void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask = xnoreg) {
__ movdqu(xmmdst, Address(key, offset));
if (xmm_shuf_mask != NULL) {
if (xmm_shuf_mask != xnoreg) {
__ pshufb(xmmdst, xmm_shuf_mask);
} else {
__ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
@ -2215,14 +2215,14 @@ class StubGenerator: public StubCodeGenerator {
// aesenc using specified key+offset
// can optionally specify that the shuffle mask is already in an xmmregister
void aes_enc_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
void aes_enc_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask = xnoreg) {
load_key(xmmtmp, key, offset, xmm_shuf_mask);
__ aesenc(xmmdst, xmmtmp);
}
// aesdec using specified key+offset
// can optionally specify that the shuffle mask is already in an xmmregister
void aes_dec_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
void aes_dec_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask = xnoreg) {
load_key(xmmtmp, key, offset, xmm_shuf_mask);
__ aesdec(xmmdst, xmmtmp);
}
@ -3506,9 +3506,9 @@ class StubGenerator: public StubCodeGenerator {
const Register d = rbx;
const Register g = rsi;
const Register h = rdi;
const Register empty = 0; // will never be used, in order not
// to change a signature for crc32c_IPL_Alg2_Alt2
// between 64/32 I'm just keeping it here
const Register empty = noreg; // will never be used, in order not
// to change a signature for crc32c_IPL_Alg2_Alt2
// between 64/32 I'm just keeping it here
assert_different_registers(crc, buf, len, d, g, h);
BLOCK_COMMENT("Entry:");

View File

@ -3566,9 +3566,9 @@ class StubGenerator: public StubCodeGenerator {
// Utility routine for loading a 128-bit key word in little endian format
// can optionally specify that the shuffle mask is already in an xmmregister
void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask = xnoreg) {
__ movdqu(xmmdst, Address(key, offset));
if (xmm_shuf_mask != NULL) {
if (xmm_shuf_mask != xnoreg) {
__ pshufb(xmmdst, xmm_shuf_mask);
} else {
__ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
@ -4974,16 +4974,16 @@ void roundDeclast(XMMRegister xmm_reg) {
__ vaesdeclast(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit);
}
void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask = NULL) {
__ movdqu(xmmdst, Address(key, offset));
if (xmm_shuf_mask != NULL) {
__ pshufb(xmmdst, xmm_shuf_mask);
} else {
__ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
}
__ evshufi64x2(xmmdst, xmmdst, xmmdst, 0x0, Assembler::AVX_512bit);
void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask = xnoreg) {
__ movdqu(xmmdst, Address(key, offset));
if (xmm_shuf_mask != xnoreg) {
__ pshufb(xmmdst, xmm_shuf_mask);
} else {
__ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
}
__ evshufi64x2(xmmdst, xmmdst, xmmdst, 0x0, Assembler::AVX_512bit);
}
address generate_cipherBlockChaining_decryptVectorAESCrypt() {
assert(VM_Version::supports_avx512_vaes(), "need AES instructions and misaligned SSE support");

View File

@ -48,7 +48,7 @@ void VMRegImpl::set_regName() {
XMMRegister xreg = ::as_XMMRegister(0);
for (; i < ConcreteRegisterImpl::max_xmm;) {
for (int j = 0 ; j < XMMRegisterImpl::max_slots_per_register ; j++) {
for (int j = 0 ; j < XMMRegister::max_slots_per_register ; j++) {
regName[i++] = xreg->name();
}
xreg = xreg->successor();
@ -56,7 +56,7 @@ void VMRegImpl::set_regName() {
KRegister kreg = ::as_KRegister(0);
for (; i < ConcreteRegisterImpl::max_kpr;) {
for (int j = 0; j < KRegisterImpl::max_slots_per_register; j++) {
for (int j = 0; j < KRegister::max_slots_per_register; j++) {
regName[i++] = kreg->name();
}
kreg = kreg->successor();

View File

@ -37,7 +37,7 @@ inline bool is_FloatRegister() {
inline bool is_XMMRegister() {
int uarch_max_xmm = ConcreteRegisterImpl::max_fpr +
(XMMRegisterImpl::max_slots_per_register * XMMRegisterImpl::available_xmm_registers());
(XMMRegister::max_slots_per_register * XMMRegister::available_xmm_registers());
return (value() >= ConcreteRegisterImpl::max_fpr && value() < uarch_max_xmm);
}
@ -87,7 +87,7 @@ inline bool is_concrete() {
// Do not use is_XMMRegister() here as it depends on the UseAVX setting.
if (value() >= ConcreteRegisterImpl::max_fpr && value() < ConcreteRegisterImpl::max_xmm) {
int base = value() - ConcreteRegisterImpl::max_fpr;
return base % XMMRegisterImpl::max_slots_per_register == 0;
return (base % XMMRegister::max_slots_per_register) == 0;
} else {
return is_even(value()); // General, float, and K registers are all two slots wide
}

View File

@ -25,24 +25,19 @@
#ifndef CPU_X86_VMREG_X86_INLINE_HPP
#define CPU_X86_VMREG_X86_INLINE_HPP
inline VMReg RegisterImpl::as_VMReg() const {
if( this==noreg ) return VMRegImpl::Bad();
#ifdef AMD64
return VMRegImpl::as_VMReg(encoding() << 1 );
#else
return VMRegImpl::as_VMReg(encoding() );
#endif // AMD64
inline VMReg Register::RegisterImpl::as_VMReg() const {
return VMRegImpl::as_VMReg(encoding() LP64_ONLY( << 1 ));
}
inline VMReg FloatRegisterImpl::as_VMReg() const {
inline VMReg FloatRegister::FloatRegisterImpl::as_VMReg() const {
return VMRegImpl::as_VMReg((encoding() << 1) + ConcreteRegisterImpl::max_gpr);
}
inline VMReg XMMRegisterImpl::as_VMReg() const {
inline VMReg XMMRegister::XMMRegisterImpl::as_VMReg() const {
return VMRegImpl::as_VMReg((encoding() << 4) + ConcreteRegisterImpl::max_fpr);
}
inline VMReg KRegisterImpl::as_VMReg() const {
inline VMReg KRegister::KRegisterImpl::as_VMReg() const {
return VMRegImpl::as_VMReg((encoding() << 1) + ConcreteRegisterImpl::max_xmm);
}

View File

@ -255,7 +255,8 @@ source %{
#define __ _masm.
// How to find the high register of a Long pair, given the low register
#define HIGH_FROM_LOW(x) ((x)+2)
#define HIGH_FROM_LOW(x) (as_Register((x)->encoding()+2))
#define HIGH_FROM_LOW_ENC(x) ((x)+2)
// These masks are used to provide 128-bit aligned bitmasks to the XMM
// instructions, to allow sign-masking or sign-bit flipping. They allow
@ -1650,7 +1651,7 @@ encode %{
int con = (int)($imm$$constant >> 32); // Throw away bottom bits
emit_opcode(cbuf, ((con >= -128) && (con <= 127)) ? ($primary | 0x02) : $primary);
// Emit r/m byte with tertiary opcode, after primary opcode.
emit_rm(cbuf, 0x3, $tertiary, HIGH_FROM_LOW($dst$$reg));
emit_rm(cbuf, 0x3, $tertiary, HIGH_FROM_LOW_ENC($dst$$reg));
if ((con >= -128) && (con <= 127)) emit_d8 (cbuf,con);
else emit_d32(cbuf,con);
%}
@ -1661,7 +1662,7 @@ encode %{
enc_class bswap_long_bytes(eRegL dst) %{ // BSWAP
int destlo = $dst$$reg;
int desthi = HIGH_FROM_LOW(destlo);
int desthi = HIGH_FROM_LOW_ENC(destlo);
// bswap lo
emit_opcode(cbuf, 0x0F);
emit_cc(cbuf, 0xC8, destlo);
@ -1946,7 +1947,7 @@ encode %{
enc_class RegReg_Hi(eRegL dst, eRegL src) %{ // RegReg(Many)
$$$emit8$secondary;
emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), HIGH_FROM_LOW($src$$reg));
emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($src$$reg));
%}
enc_class RegReg_Lo2(eRegL dst, eRegL src) %{ // RegReg(Many)
@ -1954,11 +1955,11 @@ encode %{
%}
enc_class RegReg_Hi2(eRegL dst, eRegL src) %{ // RegReg(Many)
emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), HIGH_FROM_LOW($src$$reg));
emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($src$$reg));
%}
enc_class RegReg_HiLo( eRegL src, rRegI dst ) %{
emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($src$$reg));
emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($src$$reg));
%}
enc_class Con32 (immI src) %{ // Con32(storeImmI)
@ -2083,7 +2084,7 @@ encode %{
%}
enc_class RegMem_Hi(eRegL ereg, memory mem) %{ // emit_reg_mem
int reg_encoding = HIGH_FROM_LOW($ereg$$reg); // Hi register of pair, computed from lo
int reg_encoding = HIGH_FROM_LOW_ENC($ereg$$reg); // Hi register of pair, computed from lo
int base = $mem$$base;
int index = $mem$$index;
int scale = $mem$$scale;
@ -2094,8 +2095,8 @@ encode %{
enc_class move_long_small_shift( eRegL dst, immI_1_31 cnt ) %{
int r1, r2;
if( $tertiary == 0xA4 ) { r1 = $dst$$reg; r2 = HIGH_FROM_LOW($dst$$reg); }
else { r2 = $dst$$reg; r1 = HIGH_FROM_LOW($dst$$reg); }
if( $tertiary == 0xA4 ) { r1 = $dst$$reg; r2 = HIGH_FROM_LOW_ENC($dst$$reg); }
else { r2 = $dst$$reg; r1 = HIGH_FROM_LOW_ENC($dst$$reg); }
emit_opcode(cbuf,0x0F);
emit_opcode(cbuf,$tertiary);
emit_rm(cbuf, 0x3, r1, r2);
@ -2107,21 +2108,21 @@ encode %{
enc_class move_long_big_shift_sign( eRegL dst, immI_32_63 cnt ) %{
emit_opcode( cbuf, 0x8B ); // Move
emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($dst$$reg));
emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg));
if( $cnt$$constant > 32 ) { // Shift, if not by zero
emit_d8(cbuf,$primary);
emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
emit_d8(cbuf,$cnt$$constant-32);
}
emit_d8(cbuf,$primary);
emit_rm(cbuf, 0x3, $secondary, HIGH_FROM_LOW($dst$$reg));
emit_rm(cbuf, 0x3, $secondary, HIGH_FROM_LOW_ENC($dst$$reg));
emit_d8(cbuf,31);
%}
enc_class move_long_big_shift_clr( eRegL dst, immI_32_63 cnt ) %{
int r1, r2;
if( $secondary == 0x5 ) { r1 = $dst$$reg; r2 = HIGH_FROM_LOW($dst$$reg); }
else { r2 = $dst$$reg; r1 = HIGH_FROM_LOW($dst$$reg); }
if( $secondary == 0x5 ) { r1 = $dst$$reg; r2 = HIGH_FROM_LOW_ENC($dst$$reg); }
else { r2 = $dst$$reg; r1 = HIGH_FROM_LOW_ENC($dst$$reg); }
emit_opcode( cbuf, 0x8B ); // Move r1,r2
emit_rm(cbuf, 0x3, r1, r2);
@ -2265,7 +2266,7 @@ encode %{
emit_d8(cbuf, 0x04);
// MOV $dst.hi,$dst.lo
emit_opcode( cbuf, 0x8B );
emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), $dst$$reg );
emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg );
// CLR $dst.lo
emit_opcode(cbuf, 0x33);
emit_rm(cbuf, 0x3, $dst$$reg, $dst$$reg);
@ -2273,7 +2274,7 @@ encode %{
// SHLD $dst.hi,$dst.lo,$shift
emit_opcode(cbuf,0x0F);
emit_opcode(cbuf,0xA5);
emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($dst$$reg));
emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg));
// SHL $dst.lo,$shift"
emit_opcode(cbuf,0xD3);
emit_rm(cbuf, 0x3, 0x4, $dst$$reg );
@ -2289,18 +2290,18 @@ encode %{
emit_d8(cbuf, 0x04);
// MOV $dst.lo,$dst.hi
emit_opcode( cbuf, 0x8B );
emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($dst$$reg) );
emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
// CLR $dst.hi
emit_opcode(cbuf, 0x33);
emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), HIGH_FROM_LOW($dst$$reg));
emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($dst$$reg));
// small:
// SHRD $dst.lo,$dst.hi,$shift
emit_opcode(cbuf,0x0F);
emit_opcode(cbuf,0xAD);
emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), $dst$$reg);
emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg);
// SHR $dst.hi,$shift"
emit_opcode(cbuf,0xD3);
emit_rm(cbuf, 0x3, 0x5, HIGH_FROM_LOW($dst$$reg) );
emit_rm(cbuf, 0x3, 0x5, HIGH_FROM_LOW_ENC($dst$$reg) );
%}
enc_class shift_right_arith_long( eRegL dst, eCXRegI shift ) %{
@ -2313,19 +2314,19 @@ encode %{
emit_d8(cbuf, 0x05);
// MOV $dst.lo,$dst.hi
emit_opcode( cbuf, 0x8B );
emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($dst$$reg) );
emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
// SAR $dst.hi,31
emit_opcode(cbuf, 0xC1);
emit_rm(cbuf, 0x3, 7, HIGH_FROM_LOW($dst$$reg) );
emit_rm(cbuf, 0x3, 7, HIGH_FROM_LOW_ENC($dst$$reg) );
emit_d8(cbuf, 0x1F );
// small:
// SHRD $dst.lo,$dst.hi,$shift
emit_opcode(cbuf,0x0F);
emit_opcode(cbuf,0xAD);
emit_rm(cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), $dst$$reg);
emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg);
// SAR $dst.hi,$shift"
emit_opcode(cbuf,0xD3);
emit_rm(cbuf, 0x3, 0x7, HIGH_FROM_LOW($dst$$reg) );
emit_rm(cbuf, 0x3, 0x7, HIGH_FROM_LOW_ENC($dst$$reg) );
%}
@ -2655,7 +2656,7 @@ encode %{
enc_class cmpl_test( eRegL src1, eRegL src2 ) %{
// CMP $src1.hi,$src2.hi
emit_opcode( cbuf, 0x3B );
emit_rm(cbuf, 0x3, HIGH_FROM_LOW($src1$$reg), HIGH_FROM_LOW($src2$$reg) );
emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($src1$$reg), HIGH_FROM_LOW_ENC($src2$$reg) );
// JNE,s done
emit_opcode(cbuf,0x75);
emit_d8(cbuf, 2 );
@ -2671,16 +2672,16 @@ encode %{
int src_encoding = $src$$reg;
encode_Copy( cbuf, dst_encoding , src_encoding );
// mov $dst.hi,$src
encode_Copy( cbuf, HIGH_FROM_LOW(dst_encoding), src_encoding );
encode_Copy( cbuf, HIGH_FROM_LOW_ENC(dst_encoding), src_encoding );
// sar $dst.hi,31
emit_opcode( cbuf, 0xC1 );
emit_rm(cbuf, 0x3, 7, HIGH_FROM_LOW(dst_encoding) );
emit_rm(cbuf, 0x3, 7, HIGH_FROM_LOW_ENC(dst_encoding) );
emit_d8(cbuf, 0x1F );
%}
enc_class convert_long_double( eRegL src ) %{
// push $src.hi
emit_opcode(cbuf, 0x50+HIGH_FROM_LOW($src$$reg));
emit_opcode(cbuf, 0x50+HIGH_FROM_LOW_ENC($src$$reg));
// push $src.lo
emit_opcode(cbuf, 0x50+$src$$reg );
// fild 64-bits at [SP]
@ -2710,7 +2711,7 @@ encode %{
// this version doesn't have add sp, 8
enc_class convert_long_double2( eRegL src ) %{
// push $src.hi
emit_opcode(cbuf, 0x50+HIGH_FROM_LOW($src$$reg));
emit_opcode(cbuf, 0x50+HIGH_FROM_LOW_ENC($src$$reg));
// push $src.lo
emit_opcode(cbuf, 0x50+$src$$reg );
// fild 64-bits at [SP]
@ -2742,22 +2743,22 @@ encode %{
// IMUL $tmp,EDX
emit_opcode( cbuf, 0x0F );
emit_opcode( cbuf, 0xAF );
emit_rm( cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($dst$$reg) );
emit_rm( cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
// MOV EDX,$src.hi
encode_Copy( cbuf, HIGH_FROM_LOW($dst$$reg), HIGH_FROM_LOW($src$$reg) );
encode_Copy( cbuf, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($src$$reg) );
// IMUL EDX,EAX
emit_opcode( cbuf, 0x0F );
emit_opcode( cbuf, 0xAF );
emit_rm( cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), $dst$$reg );
emit_rm( cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg );
// ADD $tmp,EDX
emit_opcode( cbuf, 0x03 );
emit_rm( cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($dst$$reg) );
emit_rm( cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
// MUL EDX:EAX,$src.lo
emit_opcode( cbuf, 0xF7 );
emit_rm( cbuf, 0x3, 0x4, $src$$reg );
// ADD EDX,ESI
emit_opcode( cbuf, 0x03 );
emit_rm( cbuf, 0x3, HIGH_FROM_LOW($dst$$reg), $tmp$$reg );
emit_rm( cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $tmp$$reg );
%}
enc_class long_multiply_con( eADXRegL dst, immL_127 src, rRegI tmp ) %{
@ -2765,7 +2766,7 @@ encode %{
// hi(result) = hi(src * y_lo) + lo(src * y_hi)
// IMUL $tmp,EDX,$src
emit_opcode( cbuf, 0x6B );
emit_rm( cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($dst$$reg) );
emit_rm( cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
emit_d8( cbuf, (int)$src$$constant );
// MOV EDX,$src
emit_opcode(cbuf, 0xB8 + EDX_enc);
@ -2780,11 +2781,11 @@ encode %{
enc_class long_div( eRegL src1, eRegL src2 ) %{
// PUSH src1.hi
emit_opcode(cbuf, HIGH_FROM_LOW(0x50+$src1$$reg) );
emit_opcode(cbuf, HIGH_FROM_LOW_ENC(0x50+$src1$$reg) );
// PUSH src1.lo
emit_opcode(cbuf, 0x50+$src1$$reg );
// PUSH src2.hi
emit_opcode(cbuf, HIGH_FROM_LOW(0x50+$src2$$reg) );
emit_opcode(cbuf, HIGH_FROM_LOW_ENC(0x50+$src2$$reg) );
// PUSH src2.lo
emit_opcode(cbuf, 0x50+$src2$$reg );
// CALL directly to the runtime
@ -2801,11 +2802,11 @@ encode %{
enc_class long_mod( eRegL src1, eRegL src2 ) %{
// PUSH src1.hi
emit_opcode(cbuf, HIGH_FROM_LOW(0x50+$src1$$reg) );
emit_opcode(cbuf, HIGH_FROM_LOW_ENC(0x50+$src1$$reg) );
// PUSH src1.lo
emit_opcode(cbuf, 0x50+$src1$$reg );
// PUSH src2.hi
emit_opcode(cbuf, HIGH_FROM_LOW(0x50+$src2$$reg) );
emit_opcode(cbuf, HIGH_FROM_LOW_ENC(0x50+$src2$$reg) );
// PUSH src2.lo
emit_opcode(cbuf, 0x50+$src2$$reg );
// CALL directly to the runtime
@ -2826,7 +2827,7 @@ encode %{
emit_rm(cbuf, 0x3, $tmp$$reg, $src$$reg);
// OR $tmp,$src.hi
emit_opcode(cbuf, 0x0B);
emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($src$$reg));
emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($src$$reg));
%}
enc_class long_cmp_flags1( eRegL src1, eRegL src2 ) %{
@ -2838,7 +2839,7 @@ encode %{
emit_d8(cbuf,2);
// CMP $src1.hi,$src2.hi
emit_opcode( cbuf, 0x3B );
emit_rm(cbuf, 0x3, HIGH_FROM_LOW($src1$$reg), HIGH_FROM_LOW($src2$$reg) );
emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($src1$$reg), HIGH_FROM_LOW_ENC($src2$$reg) );
%}
enc_class long_cmp_flags2( eRegL src1, eRegL src2, rRegI tmp ) %{
@ -2847,10 +2848,10 @@ encode %{
emit_rm(cbuf, 0x3, $src1$$reg, $src2$$reg );
// MOV $tmp,$src1.hi
emit_opcode( cbuf, 0x8B );
emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($src1$$reg) );
emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($src1$$reg) );
// SBB $tmp,$src2.hi\t! Compute flags for long compare
emit_opcode( cbuf, 0x1B );
emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($src2$$reg) );
emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($src2$$reg) );
%}
enc_class long_cmp_flags3( eRegL src, rRegI tmp ) %{
@ -2862,17 +2863,17 @@ encode %{
emit_rm(cbuf, 0x3, $tmp$$reg, $src$$reg );
// SBB $tmp,$src.hi
emit_opcode( cbuf, 0x1B );
emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW($src$$reg) );
emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($src$$reg) );
%}
// Sniff, sniff... smells like Gnu Superoptimizer
enc_class neg_long( eRegL dst ) %{
emit_opcode(cbuf,0xF7); // NEG hi
emit_rm (cbuf,0x3, 0x3, HIGH_FROM_LOW($dst$$reg));
emit_rm (cbuf,0x3, 0x3, HIGH_FROM_LOW_ENC($dst$$reg));
emit_opcode(cbuf,0xF7); // NEG lo
emit_rm (cbuf,0x3, 0x3, $dst$$reg );
emit_opcode(cbuf,0x83); // SBB hi,0
emit_rm (cbuf,0x3, 0x3, HIGH_FROM_LOW($dst$$reg));
emit_rm (cbuf,0x3, 0x3, HIGH_FROM_LOW_ENC($dst$$reg));
emit_d8 (cbuf,0 );
%}

View File

@ -140,11 +140,13 @@ public:
}
static AbstractRegSet range(RegImpl start, RegImpl end) {
assert(start <= end, "must be");
int start_enc = start->encoding();
int end_enc = end->encoding();
assert(start_enc <= end_enc, "must be");
uint32_t bits = ~0;
bits <<= start->encoding();
bits <<= 31 - end->encoding();
bits >>= 31 - end->encoding();
bits <<= start_enc;
bits <<= 31 - end_enc;
bits >>= 31 - end_enc;
return AbstractRegSet(bits);
}

View File

@ -2552,9 +2552,8 @@
/* Calling convention constants */ \
/********************************/ \
\
declare_constant(RegisterImpl::number_of_registers) \
declare_constant(ConcreteRegisterImpl::number_of_registers) \
declare_preprocessor_constant("REG_COUNT", REG_COUNT) \
declare_preprocessor_constant("REG_COUNT", REG_COUNT) \
declare_c2_preprocessor_constant("SAVED_ON_ENTRY_REG_COUNT", SAVED_ON_ENTRY_REG_COUNT) \
declare_c2_preprocessor_constant("C_SAVED_ON_ENTRY_REG_COUNT", C_SAVED_ON_ENTRY_REG_COUNT) \
\