8068053: AARCH64: C1 and C2 compilers

Add src/cpu/aarch64/vm/* C1 and C2 files

Reviewed-by: kvn, roland
This commit is contained in:
Andrew Haley 2015-01-20 12:47:43 -08:00
parent 9c458decf5
commit 117205a41f
20 changed files with 20622 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,365 @@
dnl Copyright (c) 2014, Red Hat Inc. All rights reserved.
dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
dnl
dnl This code is free software; you can redistribute it and/or modify it
dnl under the terms of the GNU General Public License version 2 only, as
dnl published by the Free Software Foundation.
dnl
dnl This code is distributed in the hope that it will be useful, but WITHOUT
dnl ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
dnl FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
dnl version 2 for more details (a copy is included in the LICENSE file that
dnl accompanied this code).
dnl
dnl You should have received a copy of the GNU General Public License version
dnl 2 along with this work; if not, write to the Free Software Foundation,
dnl Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
dnl
dnl Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
dnl or visit www.oracle.com if you need additional information or have any
dnl questions.
dnl
dnl
dnl Process this file with m4 aarch64_ad.m4 to generate the arithmetic
dnl and shift patterns patterns used in aarch64.ad.
dnl
// BEGIN This section of the file is automatically generated. Do not edit --------------
define(`BASE_SHIFT_INSN',
`
instruct $2$1_reg_$4_reg(iReg$1NoSp dst,
iReg$1 src1, iReg$1 src2,
immI src3, rFlagsReg cr) %{
match(Set dst ($2$1 src1 ($4$1 src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "$3 $dst, $src1, $src2, $5 $src3" %}
ins_encode %{
__ $3(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::$5,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}')dnl
define(`BASE_INVERTED_INSN',
`
instruct $2$1_reg_not_reg(iReg$1NoSp dst,
iReg$1 src1, iReg$1 src2, imm$1_M1 m1,
rFlagsReg cr) %{
dnl This ifelse is because hotspot reassociates (xor (xor ..)..)
dnl into this canonical form.
ifelse($2,Xor,
match(Set dst (Xor$1 m1 (Xor$1 src2 src1)));,
match(Set dst ($2$1 src1 (Xor$1 src2 m1)));)
ins_cost(INSN_COST);
format %{ "$3 $dst, $src1, $src2" %}
ins_encode %{
__ $3(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL, 0);
%}
ins_pipe(ialu_reg_reg);
%}')dnl
define(`INVERTED_SHIFT_INSN',
`
instruct $2$1_reg_$4_not_reg(iReg$1NoSp dst,
iReg$1 src1, iReg$1 src2,
immI src3, imm$1_M1 src4, rFlagsReg cr) %{
dnl This ifelse is because hotspot reassociates (xor (xor ..)..)
dnl into this canonical form.
ifelse($2,Xor,
match(Set dst ($2$1 src4 (Xor$1($4$1 src2 src3) src1)));,
match(Set dst ($2$1 src1 (Xor$1($4$1 src2 src3) src4)));)
ins_cost(1.9 * INSN_COST);
format %{ "$3 $dst, $src1, $src2, $5 $src3" %}
ins_encode %{
__ $3(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::$5,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}')dnl
define(`NOT_INSN',
`instruct reg$1_not_reg(iReg$1NoSp dst,
iReg$1 src1, imm$1_M1 m1,
rFlagsReg cr) %{
match(Set dst (Xor$1 src1 m1));
ins_cost(INSN_COST);
format %{ "$2 $dst, $src1, zr" %}
ins_encode %{
__ $2(as_Register($dst$$reg),
as_Register($src1$$reg),
zr,
Assembler::LSL, 0);
%}
ins_pipe(ialu_reg);
%}')dnl
dnl
define(`BOTH_SHIFT_INSNS',
`BASE_SHIFT_INSN(I, $1, ifelse($2,andr,andw,$2w), $3, $4)
BASE_SHIFT_INSN(L, $1, $2, $3, $4)')dnl
dnl
define(`BOTH_INVERTED_INSNS',
`BASE_INVERTED_INSN(I, $1, $2, $3, $4)
BASE_INVERTED_INSN(L, $1, $2, $3, $4)')dnl
dnl
define(`BOTH_INVERTED_SHIFT_INSNS',
`INVERTED_SHIFT_INSN(I, $1, $2w, $3, $4, ~0, int)
INVERTED_SHIFT_INSN(L, $1, $2, $3, $4, ~0l, long)')dnl
dnl
define(`ALL_SHIFT_KINDS',
`BOTH_SHIFT_INSNS($1, $2, URShift, LSR)
BOTH_SHIFT_INSNS($1, $2, RShift, ASR)
BOTH_SHIFT_INSNS($1, $2, LShift, LSL)')dnl
dnl
define(`ALL_INVERTED_SHIFT_KINDS',
`BOTH_INVERTED_SHIFT_INSNS($1, $2, URShift, LSR)
BOTH_INVERTED_SHIFT_INSNS($1, $2, RShift, ASR)
BOTH_INVERTED_SHIFT_INSNS($1, $2, LShift, LSL)')dnl
dnl
NOT_INSN(L, eon)
NOT_INSN(I, eonw)
BOTH_INVERTED_INSNS(And, bic)
BOTH_INVERTED_INSNS(Or, orn)
BOTH_INVERTED_INSNS(Xor, eon)
ALL_INVERTED_SHIFT_KINDS(And, bic)
ALL_INVERTED_SHIFT_KINDS(Xor, eon)
ALL_INVERTED_SHIFT_KINDS(Or, orn)
ALL_SHIFT_KINDS(And, andr)
ALL_SHIFT_KINDS(Xor, eor)
ALL_SHIFT_KINDS(Or, orr)
ALL_SHIFT_KINDS(Add, add)
ALL_SHIFT_KINDS(Sub, sub)
dnl
dnl EXTEND mode, rshift_op, src, lshift_count, rshift_count
define(`EXTEND', `($2$1 (LShift$1 $3 $4) $5)')
define(`BFM_INSN',`
// Shift Left followed by Shift Right.
// This idiom is used by the compiler for the i2b bytecode etc.
instruct $4$1(iReg$1NoSp dst, iReg$1 src, immI lshift_count, immI rshift_count)
%{
match(Set dst EXTEND($1, $3, src, lshift_count, rshift_count));
// Make sure we are not going to exceed what $4 can do.
predicate((unsigned int)n->in(2)->get_int() <= $2
&& (unsigned int)n->in(1)->in(2)->get_int() <= $2);
ins_cost(INSN_COST * 2);
format %{ "$4 $dst, $src, $rshift_count - $lshift_count, #$2 - $lshift_count" %}
ins_encode %{
int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
int s = $2 - lshift;
int r = (rshift - lshift) & $2;
__ $4(as_Register($dst$$reg),
as_Register($src$$reg),
r, s);
%}
ins_pipe(ialu_reg_shift);
%}')
BFM_INSN(L, 63, RShift, sbfm)
BFM_INSN(I, 31, RShift, sbfmw)
BFM_INSN(L, 63, URShift, ubfm)
BFM_INSN(I, 31, URShift, ubfmw)
dnl
// Bitfield extract with shift & mask
define(`BFX_INSN',
`instruct $3$1(iReg$1NoSp dst, iReg$1 src, immI rshift, imm$1_bitmask mask)
%{
match(Set dst (And$1 ($2$1 src rshift) mask));
ins_cost(INSN_COST);
format %{ "$3 $dst, $src, $mask" %}
ins_encode %{
int rshift = $rshift$$constant;
long mask = $mask$$constant;
int width = exact_log2(mask+1);
__ $3(as_Register($dst$$reg),
as_Register($src$$reg), rshift, width);
%}
ins_pipe(ialu_reg_shift);
%}')
BFX_INSN(I,URShift,ubfxw)
BFX_INSN(L,URShift,ubfx)
// We can use ubfx when extending an And with a mask when we know mask
// is positive. We know that because immI_bitmask guarantees it.
instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
%{
match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
ins_cost(INSN_COST * 2);
format %{ "ubfx $dst, $src, $mask" %}
ins_encode %{
int rshift = $rshift$$constant;
long mask = $mask$$constant;
int width = exact_log2(mask+1);
__ ubfx(as_Register($dst$$reg),
as_Register($src$$reg), rshift, width);
%}
ins_pipe(ialu_reg_shift);
%}
// Rotations
define(`EXTRACT_INSN',
`instruct extr$3$1(iReg$1NoSp dst, iReg$1 src1, iReg$1 src2, immI lshift, immI rshift, rFlagsReg cr)
%{
match(Set dst ($3$1 (LShift$1 src1 lshift) (URShift$1 src2 rshift)));
predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & $2));
ins_cost(INSN_COST);
format %{ "extr $dst, $src1, $src2, #$rshift" %}
ins_encode %{
__ $4(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
$rshift$$constant & $2);
%}
ins_pipe(ialu_reg_reg_extr);
%}
')dnl
EXTRACT_INSN(L, 63, Or, extr)
EXTRACT_INSN(I, 31, Or, extrw)
EXTRACT_INSN(L, 63, Add, extr)
EXTRACT_INSN(I, 31, Add, extrw)
define(`ROL_EXPAND', `
// $2 expander
instruct $2$1_rReg(iReg$1 dst, iReg$1 src, iRegI shift, rFlagsReg cr)
%{
effect(DEF dst, USE src, USE shift);
format %{ "$2 $dst, $src, $shift" %}
ins_cost(INSN_COST * 3);
ins_encode %{
__ subw(rscratch1, zr, as_Register($shift$$reg));
__ $3(as_Register($dst$$reg), as_Register($src$$reg),
rscratch1);
%}
ins_pipe(ialu_reg_reg_vshift);
%}')dnl
define(`ROR_EXPAND', `
// $2 expander
instruct $2$1_rReg(iReg$1 dst, iReg$1 src, iRegI shift, rFlagsReg cr)
%{
effect(DEF dst, USE src, USE shift);
format %{ "$2 $dst, $src, $shift" %}
ins_cost(INSN_COST);
ins_encode %{
__ $3(as_Register($dst$$reg), as_Register($src$$reg),
as_Register($shift$$reg));
%}
ins_pipe(ialu_reg_reg_vshift);
%}')dnl
define(ROL_INSN, `
instruct $3$1_rReg_Var_C$2(iRegL dst, iRegL src, iRegI shift, immI$2 c$2, rFlagsReg cr)
%{
match(Set dst (Or$1 (LShift$1 src shift) (URShift$1 src (SubI c$2 shift))));
expand %{
$3L_rReg(dst, src, shift, cr);
%}
%}')dnl
define(ROR_INSN, `
instruct $3$1_rReg_Var_C$2(iRegL dst, iRegL src, iRegI shift, immI$2 c$2, rFlagsReg cr)
%{
match(Set dst (Or$1 (URShift$1 src shift) (LShift$1 src (SubI c$2 shift))));
expand %{
$3L_rReg(dst, src, shift, cr);
%}
%}')dnl
ROL_EXPAND(L, rol, rorv)
ROL_EXPAND(I, rol, rorvw)
ROL_INSN(L, _64, rol)
ROL_INSN(L, 0, rol)
ROL_INSN(I, _32, rol)
ROL_INSN(I, 0, rol)
ROR_EXPAND(L, ror, rorv)
ROR_EXPAND(I, ror, rorvw)
ROR_INSN(L, _64, ror)
ROR_INSN(L, 0, ror)
ROR_INSN(I, _32, ror)
ROR_INSN(I, 0, ror)
// Add/subtract (extended)
dnl ADD_SUB_EXTENDED(mode, size, add node, shift node, insn, shift type, wordsize
define(`ADD_SUB_CONV', `
instruct $3Ext$1(iReg$2NoSp dst, iReg$2 src1, iReg$1orL2I src2, rFlagsReg cr)
%{
match(Set dst ($3$2 src1 (ConvI2L src2)));
ins_cost(INSN_COST);
format %{ "$4 $dst, $src1, $5 $src2" %}
ins_encode %{
__ $4(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::$5);
%}
ins_pipe(ialu_reg_reg);
%}')dnl
ADD_SUB_CONV(I,L,Add,add,sxtw);
ADD_SUB_CONV(I,L,Sub,sub,sxtw);
dnl
define(`ADD_SUB_EXTENDED', `
instruct $3Ext$1_$6(iReg$1NoSp dst, iReg$1 src1, iReg$1 src2, immI_`'eval($7-$2) lshift, immI_`'eval($7-$2) rshift, rFlagsReg cr)
%{
match(Set dst ($3$1 src1 EXTEND($1, $4, src2, lshift, rshift)));
ins_cost(INSN_COST);
format %{ "$5 $dst, $src1, $6 $src2" %}
ins_encode %{
__ $5(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::$6);
%}
ins_pipe(ialu_reg_reg);
%}')
ADD_SUB_EXTENDED(I,16,Add,RShift,add,sxth,32)
ADD_SUB_EXTENDED(I,8,Add,RShift,add,sxtb,32)
ADD_SUB_EXTENDED(I,8,Add,URShift,add,uxtb,32)
ADD_SUB_EXTENDED(L,16,Add,RShift,add,sxth,64)
ADD_SUB_EXTENDED(L,32,Add,RShift,add,sxtw,64)
ADD_SUB_EXTENDED(L,8,Add,RShift,add,sxtb,64)
ADD_SUB_EXTENDED(L,8,Add,URShift,add,uxtb,64)
dnl
dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, shift type)
define(`ADD_SUB_ZERO_EXTEND', `
instruct $3Ext$1_$5_and(iReg$1NoSp dst, iReg$1 src1, iReg$1 src2, imm$1_$2 mask, rFlagsReg cr)
%{
match(Set dst ($3$1 src1 (And$1 src2 mask)));
ins_cost(INSN_COST);
format %{ "$4 $dst, $src1, $src2, $5" %}
ins_encode %{
__ $4(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::$5);
%}
ins_pipe(ialu_reg_reg);
%}')
dnl
ADD_SUB_ZERO_EXTEND(I,255,Add,addw,uxtb)
ADD_SUB_ZERO_EXTEND(I,65535,Add,addw,uxth)
ADD_SUB_ZERO_EXTEND(L,255,Add,add,uxtb)
ADD_SUB_ZERO_EXTEND(L,65535,Add,add,uxth)
ADD_SUB_ZERO_EXTEND(L,4294967295,Add,add,uxtw)
dnl
ADD_SUB_ZERO_EXTEND(I,255,Sub,subw,uxtb)
ADD_SUB_ZERO_EXTEND(I,65535,Sub,subw,uxth)
ADD_SUB_ZERO_EXTEND(L,255,Sub,sub,uxtb)
ADD_SUB_ZERO_EXTEND(L,65535,Sub,sub,uxth)
ADD_SUB_ZERO_EXTEND(L,4294967295,Sub,sub,uxtw)
// END This section of the file is automatically generated. Do not edit --------------

View File

@ -0,0 +1,98 @@
dnl Copyright (c) 2014, Red Hat Inc. All rights reserved.
dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
dnl
dnl This code is free software; you can redistribute it and/or modify it
dnl under the terms of the GNU General Public License version 2 only, as
dnl published by the Free Software Foundation.
dnl
dnl This code is distributed in the hope that it will be useful, but WITHOUT
dnl ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
dnl FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
dnl version 2 for more details (a copy is included in the LICENSE file that
dnl accompanied this code).
dnl
dnl You should have received a copy of the GNU General Public License version
dnl 2 along with this work; if not, write to the Free Software Foundation,
dnl Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
dnl
dnl Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
dnl or visit www.oracle.com if you need additional information or have any
dnl questions.
dnl
dnl
dnl Process this file with m4 ad_encode.m4 to generate the load/store
dnl patterns used in aarch64.ad.
dnl
define(choose, `loadStore($1, &MacroAssembler::$3, $2, $4,
$5, $6, $7, $8);dnl
%}')dnl
define(access, `
$3Register $1_reg = as_$3Register($$1$$reg);
$4choose(MacroAssembler(&cbuf), $1_reg,$2,$mem->opcode(),
as_Register($mem$$base),$mem$$index,$mem$$scale,$mem$$disp)')dnl
define(load,`
enc_class aarch64_enc_$2($1 dst, memory mem) %{dnl
access(dst,$2,$3)')dnl
load(iRegI,ldrsbw)
load(iRegI,ldrsb)
load(iRegI,ldrb)
load(iRegL,ldrb)
load(iRegI,ldrshw)
load(iRegI,ldrsh)
load(iRegI,ldrh)
load(iRegL,ldrh)
load(iRegI,ldrw)
load(iRegL,ldrw)
load(iRegL,ldrsw)
load(iRegL,ldr)
load(vRegF,ldrs,Float)
load(vRegD,ldrd,Float)
define(STORE,`
enc_class aarch64_enc_$2($1 src, memory mem) %{dnl
access(src,$2,$3,$4)')dnl
define(STORE0,`
enc_class aarch64_enc_$2`'0(memory mem) %{
MacroAssembler _masm(&cbuf);
choose(_masm,zr,$2,$mem->opcode(),
as_$3Register($mem$$base),$mem$$index,$mem$$scale,$mem$$disp)')dnl
STORE(iRegI,strb)
STORE0(iRegI,strb)
STORE(iRegI,strh)
STORE0(iRegI,strh)
STORE(iRegI,strw)
STORE0(iRegI,strw)
STORE(iRegL,str,,
`// we sometimes get asked to store the stack pointer into the
// current thread -- we cannot do that directly on AArch64
if (src_reg == r31_sp) {
MacroAssembler _masm(&cbuf);
assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
__ mov(rscratch2, sp);
src_reg = rscratch2;
}
')
STORE0(iRegL,str)
STORE(vRegF,strs,Float)
STORE(vRegD,strd,Float)
enc_class aarch64_enc_strw_immn(immN src, memory mem) %{
MacroAssembler _masm(&cbuf);
address con = (address)$src$$constant;
// need to do this the hard way until we can manage relocs
// for 32 bit constants
__ movoop(rscratch2, (jobject)con);
if (con) __ encode_heap_oop_not_null(rscratch2);
choose(_masm,rscratch2,strw,$mem->opcode(),
as_Register($mem$$base),$mem$$index,$mem$$scale,$mem$$disp)
enc_class aarch64_enc_strw_immnk(immN src, memory mem) %{
MacroAssembler _masm(&cbuf);
address con = (address)$src$$constant;
// need to do this the hard way until we can manage relocs
// for 32 bit constants
__ movoop(rscratch2, (jobject)con);
__ encode_klass_not_null(rscratch2);
choose(_masm,rscratch2,strw,$mem->opcode(),
as_Register($mem$$base),$mem$$index,$mem$$scale,$mem$$disp)

View File

@ -0,0 +1,391 @@
/*
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "nativeInst_aarch64.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_aarch64.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#endif
#define __ ce->masm()->
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
ce->store_parameter(_method->as_register(), 1);
ce->store_parameter(_bci, 0);
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
__ b(_continuation);
}
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
bool throw_index_out_of_bounds_exception)
: _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
, _index(index)
{
assert(info != NULL, "must have info");
_info = new CodeEmitInfo(info);
}
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
if (_info->deoptimize_on_exception()) {
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
__ far_call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
return;
}
if (_index->is_cpu_register()) {
__ mov(rscratch1, _index->as_register());
} else {
__ mov(rscratch1, _index->as_jint());
}
Runtime1::StubID stub_id;
if (_throw_index_out_of_bounds_exception) {
stub_id = Runtime1::throw_index_exception_id;
} else {
stub_id = Runtime1::throw_range_check_failed_id;
}
__ far_call(RuntimeAddress(Runtime1::entry_for(stub_id)), NULL, rscratch2);
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
_info = new CodeEmitInfo(info);
}
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
__ far_call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
void DivByZeroStub::emit_code(LIR_Assembler* ce) {
if (_offset != -1) {
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
}
__ bind(_entry);
__ far_call(Address(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
#ifdef ASSERT
__ should_not_reach_here();
#endif
}
// Implementation of NewInstanceStub
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
_result = result;
_klass = klass;
_klass_reg = klass_reg;
_info = new CodeEmitInfo(info);
assert(stub_id == Runtime1::new_instance_id ||
stub_id == Runtime1::fast_new_instance_id ||
stub_id == Runtime1::fast_new_instance_init_check_id,
"need new_instance id");
_stub_id = stub_id;
}
void NewInstanceStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry);
__ mov(r3, _klass_reg->as_register());
__ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
assert(_result->as_register() == r0, "result must in r0,");
__ b(_continuation);
}
// Implementation of NewTypeArrayStub
// Implementation of NewTypeArrayStub
NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
_klass_reg = klass_reg;
_length = length;
_result = result;
_info = new CodeEmitInfo(info);
}
void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry);
assert(_length->as_register() == r19, "length must in r19,");
assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
assert(_result->as_register() == r0, "result must in r0");
__ b(_continuation);
}
// Implementation of NewObjectArrayStub
NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
_klass_reg = klass_reg;
_result = result;
_length = length;
_info = new CodeEmitInfo(info);
}
void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry);
assert(_length->as_register() == r19, "length must in r19,");
assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
assert(_result->as_register() == r0, "result must in r0");
__ b(_continuation);
}
// Implementation of MonitorAccessStubs
MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
: MonitorAccessStub(obj_reg, lock_reg)
{
_info = new CodeEmitInfo(info);
}
void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry);
ce->store_parameter(_obj_reg->as_register(), 1);
ce->store_parameter(_lock_reg->as_register(), 0);
Runtime1::StubID enter_id;
if (ce->compilation()->has_fpu_code()) {
enter_id = Runtime1::monitorenter_id;
} else {
enter_id = Runtime1::monitorenter_nofpu_id;
}
__ far_call(RuntimeAddress(Runtime1::entry_for(enter_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
__ b(_continuation);
}
void MonitorExitStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
if (_compute_lock) {
// lock_reg was destroyed by fast unlocking attempt => recompute it
ce->monitor_address(_monitor_ix, _lock_reg);
}
ce->store_parameter(_lock_reg->as_register(), 0);
// note: non-blocking leaf routine => no call info needed
Runtime1::StubID exit_id;
if (ce->compilation()->has_fpu_code()) {
exit_id = Runtime1::monitorexit_id;
} else {
exit_id = Runtime1::monitorexit_nofpu_id;
}
__ adr(lr, _continuation);
__ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
}
// Implementation of patching:
// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
// - Replace original code with a call to the stub
// At Runtime:
// - call to stub, jump to runtime
// - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
// - in runtime: after initializing class, restore original code, reexecute instruction
int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
void PatchingStub::align_patch_site(MacroAssembler* masm) {
}
void PatchingStub::emit_code(LIR_Assembler* ce) {
assert(false, "AArch64 should not use C1 runtime patching");
}
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
ce->add_call_info_here(_info);
DEBUG_ONLY(__ should_not_reach_here());
}
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
address a;
if (_info->deoptimize_on_exception()) {
// Deoptimize, do not throw the exception, because it is probably wrong to do it here.
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
} else {
a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
}
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
__ bind(_entry);
__ far_call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry);
// pass the object in a scratch register because all other registers
// must be preserved
if (_obj->is_cpu_register()) {
__ mov(rscratch1, _obj->as_register());
}
__ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), NULL, rscratch2);
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
}
void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
//---------------slow case: call to native-----------------
__ bind(_entry);
// Figure out where the args should go
// This should really convert the IntrinsicID to the Method* and signature
// but I don't know how to do that.
//
VMRegPair args[5];
BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
SharedRuntime::java_calling_convention(signature, args, 5, true);
// push parameters
// (src, src_pos, dest, destPos, length)
Register r[5];
r[0] = src()->as_register();
r[1] = src_pos()->as_register();
r[2] = dst()->as_register();
r[3] = dst_pos()->as_register();
r[4] = length()->as_register();
// next registers will get stored on the stack
for (int i = 0; i < 5 ; i++ ) {
VMReg r_1 = args[i].first();
if (r_1->is_stack()) {
int st_off = r_1->reg2stack() * wordSize;
__ str (r[i], Address(sp, st_off));
} else {
assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
}
}
ce->align_call(lir_static_call);
ce->emit_static_call_stub();
Address resolve(SharedRuntime::get_resolve_static_call_stub(),
relocInfo::static_call_type);
__ trampoline_call(resolve);
ce->add_call_info_here(info());
#ifndef PRODUCT
__ lea(rscratch2, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
__ incrementw(Address(rscratch2));
#endif
__ b(_continuation);
}
/////////////////////////////////////////////////////////////////////////////
#if INCLUDE_ALL_GCS
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(_entry);
assert(pre_val()->is_register(), "Precondition.");
Register pre_val_reg = pre_val()->as_register();
if (do_load()) {
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
}
__ cbz(pre_val_reg, _continuation);
ce->store_parameter(pre_val()->as_register(), 0);
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
__ b(_continuation);
}
jbyte* G1PostBarrierStub::_byte_map_base = NULL;
jbyte* G1PostBarrierStub::byte_map_base_slow() {
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->is_a(BarrierSet::G1SATBCTLogging),
"Must be if we're using this.");
return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
}
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
assert(addr()->is_register(), "Precondition.");
assert(new_val()->is_register(), "Precondition.");
Register new_val_reg = new_val()->as_register();
__ cbz(new_val_reg, _continuation);
ce->store_parameter(addr()->as_pointer_register(), 0);
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
__ b(_continuation);
}
#endif // INCLUDE_ALL_GCS
/////////////////////////////////////////////////////////////////////////////
#undef __

View File

@ -0,0 +1,81 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_AARCH64_VM_C1_DEFS_AARCH64_HPP
#define CPU_AARCH64_VM_C1_DEFS_AARCH64_HPP
// native word offsets from memory address (little endian)
enum {
pd_lo_word_offset_in_bytes = 0,
pd_hi_word_offset_in_bytes = BytesPerWord
};
// explicit rounding operations are required to implement the strictFP mode
enum {
pd_strict_fp_requires_explicit_rounding = false
};
// FIXME: There are no callee-saved
// registers
enum {
pd_nof_cpu_regs_frame_map = RegisterImpl::number_of_registers, // number of registers used during code emission
pd_nof_fpu_regs_frame_map = FloatRegisterImpl::number_of_registers, // number of registers used during code emission
pd_nof_caller_save_cpu_regs_frame_map = 19 - 2, // number of registers killed by calls
pd_nof_caller_save_fpu_regs_frame_map = 32, // number of registers killed by calls
pd_first_callee_saved_reg = 19 - 2,
pd_last_callee_saved_reg = 26 - 2,
pd_last_allocatable_cpu_reg = 16,
pd_nof_cpu_regs_reg_alloc
= pd_last_allocatable_cpu_reg + 1, // number of registers that are visible to register allocator
pd_nof_fpu_regs_reg_alloc = 8, // number of registers that are visible to register allocator
pd_nof_cpu_regs_linearscan = 32, // number of registers visible to linear scan
pd_nof_fpu_regs_linearscan = pd_nof_fpu_regs_frame_map, // number of registers visible to linear scan
pd_nof_xmm_regs_linearscan = 0, // like sparc we don't have any of these
pd_first_cpu_reg = 0,
pd_last_cpu_reg = 16,
pd_first_byte_reg = 0,
pd_last_byte_reg = 16,
pd_first_fpu_reg = pd_nof_cpu_regs_frame_map,
pd_last_fpu_reg = pd_first_fpu_reg + 31,
pd_first_callee_saved_fpu_reg = 8 + pd_first_fpu_reg,
pd_last_callee_saved_fpu_reg = 15 + pd_first_fpu_reg,
};
// Encoding of float value in debug info. This is true on x86 where
// floats are extended to doubles when stored in the stack, false for
// AArch64 where floats and doubles are stored in their native form.
enum {
pd_float_saved_as_double = false
};
#endif // CPU_AARCH64_VM_C1_DEFS_AARCH64_HPP

View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_FpuStackSim.hpp"
#include "c1/c1_FrameMap.hpp"
#include "utilities/array.hpp"
#include "utilities/ostream.hpp"
//--------------------------------------------------------
// FpuStackSim
//--------------------------------------------------------
// No FPU stack on AARCH64

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_AARCH64_VM_C1_FPUSTACKSIM_HPP
#define CPU_AARCH64_VM_C1_FPUSTACKSIM_HPP
// No FPU stack on AARCH64
class FpuStackSim;
#endif // CPU_AARCH64_VM_C1_FPUSTACKSIM_HPP

View File

@ -0,0 +1,356 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIR.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_aarch64.inline.hpp"
LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) {
LIR_Opr opr = LIR_OprFact::illegalOpr;
VMReg r_1 = reg->first();
VMReg r_2 = reg->second();
if (r_1->is_stack()) {
// Convert stack slot to an SP offset
// The calling convention does not count the SharedRuntime::out_preserve_stack_slots() value
// so we must add it in here.
int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
opr = LIR_OprFact::address(new LIR_Address(sp_opr, st_off, type));
} else if (r_1->is_Register()) {
Register reg = r_1->as_Register();
if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) {
Register reg2 = r_2->as_Register();
assert(reg2 == reg, "must be same register");
opr = as_long_opr(reg);
} else if (type == T_OBJECT || type == T_ARRAY) {
opr = as_oop_opr(reg);
} else if (type == T_METADATA) {
opr = as_metadata_opr(reg);
} else {
opr = as_opr(reg);
}
} else if (r_1->is_FloatRegister()) {
assert(type == T_DOUBLE || type == T_FLOAT, "wrong type");
int num = r_1->as_FloatRegister()->encoding();
if (type == T_FLOAT) {
opr = LIR_OprFact::single_fpu(num);
} else {
opr = LIR_OprFact::double_fpu(num);
}
} else {
ShouldNotReachHere();
}
return opr;
}
LIR_Opr FrameMap::r0_opr;
LIR_Opr FrameMap::r1_opr;
LIR_Opr FrameMap::r2_opr;
LIR_Opr FrameMap::r3_opr;
LIR_Opr FrameMap::r4_opr;
LIR_Opr FrameMap::r5_opr;
LIR_Opr FrameMap::r6_opr;
LIR_Opr FrameMap::r7_opr;
LIR_Opr FrameMap::r8_opr;
LIR_Opr FrameMap::r9_opr;
LIR_Opr FrameMap::r10_opr;
LIR_Opr FrameMap::r11_opr;
LIR_Opr FrameMap::r12_opr;
LIR_Opr FrameMap::r13_opr;
LIR_Opr FrameMap::r14_opr;
LIR_Opr FrameMap::r15_opr;
LIR_Opr FrameMap::r16_opr;
LIR_Opr FrameMap::r17_opr;
LIR_Opr FrameMap::r18_opr;
LIR_Opr FrameMap::r19_opr;
LIR_Opr FrameMap::r20_opr;
LIR_Opr FrameMap::r21_opr;
LIR_Opr FrameMap::r22_opr;
LIR_Opr FrameMap::r23_opr;
LIR_Opr FrameMap::r24_opr;
LIR_Opr FrameMap::r25_opr;
LIR_Opr FrameMap::r26_opr;
LIR_Opr FrameMap::r27_opr;
LIR_Opr FrameMap::r28_opr;
LIR_Opr FrameMap::r29_opr;
LIR_Opr FrameMap::r30_opr;
LIR_Opr FrameMap::rfp_opr;
LIR_Opr FrameMap::sp_opr;
LIR_Opr FrameMap::receiver_opr;
LIR_Opr FrameMap::r0_oop_opr;
LIR_Opr FrameMap::r1_oop_opr;
LIR_Opr FrameMap::r2_oop_opr;
LIR_Opr FrameMap::r3_oop_opr;
LIR_Opr FrameMap::r4_oop_opr;
LIR_Opr FrameMap::r5_oop_opr;
LIR_Opr FrameMap::r6_oop_opr;
LIR_Opr FrameMap::r7_oop_opr;
LIR_Opr FrameMap::r8_oop_opr;
LIR_Opr FrameMap::r9_oop_opr;
LIR_Opr FrameMap::r10_oop_opr;
LIR_Opr FrameMap::r11_oop_opr;
LIR_Opr FrameMap::r12_oop_opr;
LIR_Opr FrameMap::r13_oop_opr;
LIR_Opr FrameMap::r14_oop_opr;
LIR_Opr FrameMap::r15_oop_opr;
LIR_Opr FrameMap::r16_oop_opr;
LIR_Opr FrameMap::r17_oop_opr;
LIR_Opr FrameMap::r18_oop_opr;
LIR_Opr FrameMap::r19_oop_opr;
LIR_Opr FrameMap::r20_oop_opr;
LIR_Opr FrameMap::r21_oop_opr;
LIR_Opr FrameMap::r22_oop_opr;
LIR_Opr FrameMap::r23_oop_opr;
LIR_Opr FrameMap::r24_oop_opr;
LIR_Opr FrameMap::r25_oop_opr;
LIR_Opr FrameMap::r26_oop_opr;
LIR_Opr FrameMap::r27_oop_opr;
LIR_Opr FrameMap::r28_oop_opr;
LIR_Opr FrameMap::r29_oop_opr;
LIR_Opr FrameMap::r30_oop_opr;
LIR_Opr FrameMap::rscratch1_opr;
LIR_Opr FrameMap::rscratch2_opr;
LIR_Opr FrameMap::rscratch1_long_opr;
LIR_Opr FrameMap::rscratch2_long_opr;
LIR_Opr FrameMap::r0_metadata_opr;
LIR_Opr FrameMap::r1_metadata_opr;
LIR_Opr FrameMap::r2_metadata_opr;
LIR_Opr FrameMap::r3_metadata_opr;
LIR_Opr FrameMap::r4_metadata_opr;
LIR_Opr FrameMap::r5_metadata_opr;
LIR_Opr FrameMap::long0_opr;
LIR_Opr FrameMap::long1_opr;
LIR_Opr FrameMap::fpu0_float_opr;
LIR_Opr FrameMap::fpu0_double_opr;
LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0, };
LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, };
//--------------------------------------------------------
// FrameMap
//--------------------------------------------------------
void FrameMap::initialize() {
assert(!_init_done, "once");
int i=0;
map_register(i, r0); r0_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r1); r1_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r2); r2_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r3); r3_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r4); r4_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r5); r5_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r6); r6_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r7); r7_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r10); r10_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r11); r11_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r12); r12_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r13); r13_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r14); r14_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r15); r15_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r16); r16_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r17); r17_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r18); r18_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r19); r19_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r20); r20_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r21); r21_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r22); r22_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r23); r23_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r24); r24_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r25); r25_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r26); r26_opr = LIR_OprFact::single_cpu(i); i++;
map_register(i, r27); r27_opr = LIR_OprFact::single_cpu(i); i++; // rheapbase
map_register(i, r28); r28_opr = LIR_OprFact::single_cpu(i); i++; // rthread
map_register(i, r29); r29_opr = LIR_OprFact::single_cpu(i); i++; // rfp
map_register(i, r30); r30_opr = LIR_OprFact::single_cpu(i); i++; // lr
map_register(i, r31_sp); sp_opr = LIR_OprFact::single_cpu(i); i++; // sp
map_register(i, r8); r8_opr = LIR_OprFact::single_cpu(i); i++; // rscratch1
map_register(i, r9); r9_opr = LIR_OprFact::single_cpu(i); i++; // rscratch2
rscratch1_opr = r8_opr;
rscratch2_opr = r9_opr;
rscratch1_long_opr = LIR_OprFact::double_cpu(r8_opr->cpu_regnr(), r8_opr->cpu_regnr());
rscratch2_long_opr = LIR_OprFact::double_cpu(r9_opr->cpu_regnr(), r9_opr->cpu_regnr());
long0_opr = LIR_OprFact::double_cpu(0, 0);
long1_opr = LIR_OprFact::double_cpu(1, 1);
fpu0_float_opr = LIR_OprFact::single_fpu(0);
fpu0_double_opr = LIR_OprFact::double_fpu(0);
_caller_save_cpu_regs[0] = r0_opr;
_caller_save_cpu_regs[1] = r1_opr;
_caller_save_cpu_regs[2] = r2_opr;
_caller_save_cpu_regs[3] = r3_opr;
_caller_save_cpu_regs[4] = r4_opr;
_caller_save_cpu_regs[5] = r5_opr;
_caller_save_cpu_regs[6] = r6_opr;
_caller_save_cpu_regs[7] = r7_opr;
// rscratch1, rscratch 2 not included
_caller_save_cpu_regs[8] = r10_opr;
_caller_save_cpu_regs[9] = r11_opr;
_caller_save_cpu_regs[10] = r12_opr;
_caller_save_cpu_regs[11] = r13_opr;
_caller_save_cpu_regs[12] = r14_opr;
_caller_save_cpu_regs[13] = r15_opr;
_caller_save_cpu_regs[14] = r16_opr;
_caller_save_cpu_regs[15] = r17_opr;
_caller_save_cpu_regs[16] = r18_opr;
for (int i = 0; i < 8; i++) {
_caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i);
}
_init_done = true;
r0_oop_opr = as_oop_opr(r0);
r1_oop_opr = as_oop_opr(r1);
r2_oop_opr = as_oop_opr(r2);
r3_oop_opr = as_oop_opr(r3);
r4_oop_opr = as_oop_opr(r4);
r5_oop_opr = as_oop_opr(r5);
r6_oop_opr = as_oop_opr(r6);
r7_oop_opr = as_oop_opr(r7);
r8_oop_opr = as_oop_opr(r8);
r9_oop_opr = as_oop_opr(r9);
r10_oop_opr = as_oop_opr(r10);
r11_oop_opr = as_oop_opr(r11);
r12_oop_opr = as_oop_opr(r12);
r13_oop_opr = as_oop_opr(r13);
r14_oop_opr = as_oop_opr(r14);
r15_oop_opr = as_oop_opr(r15);
r16_oop_opr = as_oop_opr(r16);
r17_oop_opr = as_oop_opr(r17);
r18_oop_opr = as_oop_opr(r18);
r19_oop_opr = as_oop_opr(r19);
r20_oop_opr = as_oop_opr(r20);
r21_oop_opr = as_oop_opr(r21);
r22_oop_opr = as_oop_opr(r22);
r23_oop_opr = as_oop_opr(r23);
r24_oop_opr = as_oop_opr(r24);
r25_oop_opr = as_oop_opr(r25);
r26_oop_opr = as_oop_opr(r26);
r27_oop_opr = as_oop_opr(r27);
r28_oop_opr = as_oop_opr(r28);
r29_oop_opr = as_oop_opr(r29);
r30_oop_opr = as_oop_opr(r30);
r0_metadata_opr = as_metadata_opr(r0);
r1_metadata_opr = as_metadata_opr(r1);
r2_metadata_opr = as_metadata_opr(r2);
r3_metadata_opr = as_metadata_opr(r3);
r4_metadata_opr = as_metadata_opr(r4);
r5_metadata_opr = as_metadata_opr(r5);
sp_opr = as_pointer_opr(r31_sp);
rfp_opr = as_pointer_opr(rfp);
VMRegPair regs;
BasicType sig_bt = T_OBJECT;
SharedRuntime::java_calling_convention(&sig_bt, &regs, 1, true);
receiver_opr = as_oop_opr(regs.first()->as_Register());
for (int i = 0; i < nof_caller_save_fpu_regs; i++) {
_caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i);
}
}
Address FrameMap::make_new_address(ByteSize sp_offset) const {
// for rbp, based address use this:
// return Address(rbp, in_bytes(sp_offset) - (framesize() - 2) * 4);
return Address(sp, in_bytes(sp_offset));
}
// ----------------mapping-----------------------
// all mapping is based on rfp addressing, except for simple leaf methods where we access
// the locals sp based (and no frame is built)
// Frame for simple leaf methods (quick entries)
//
// +----------+
// | ret addr | <- TOS
// +----------+
// | args |
// | ...... |
// Frame for standard methods
//
// | .........| <- TOS
// | locals |
// +----------+
// | old fp, | <- RFP
// +----------+
// | ret addr |
// +----------+
// | args |
// | .........|
// For OopMaps, map a local variable or spill index to an VMRegImpl name.
// This is the offset from sp() in the frame of the slot for the index,
// skewed by VMRegImpl::stack0 to indicate a stack location (vs.a register.)
//
// framesize +
// stack0 stack0 0 <- VMReg
// | | <registers> |
// ...........|..............|.............|
// 0 1 2 3 x x 4 5 6 ... | <- local indices
// ^ ^ sp() ( x x indicate link
// | | and return addr)
// arguments non-argument locals
VMReg FrameMap::fpu_regname (int n) {
// Return the OptoReg name for the fpu stack slot "n"
// A spilled fpu stack slot comprises to two single-word OptoReg's.
return as_FloatRegister(n)->as_VMReg();
}
LIR_Opr FrameMap::stack_pointer() {
return FrameMap::sp_opr;
}
// JSR 292
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
// assert(rfp == rbp_mh_SP_save, "must be same register");
return rfp_opr;
}
bool FrameMap::validate_frame() {
return true;
}

View File

@ -0,0 +1,148 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_AARCH64_VM_C1_FRAMEMAP_AARCH64_HPP
#define CPU_AARCH64_VM_C1_FRAMEMAP_AARCH64_HPP
// On AArch64 the frame looks as follows:
//
// +-----------------------------+---------+----------------------------------------+----------------+-----------
// | size_arguments-nof_reg_args | 2 words | size_locals-size_arguments+numreg_args | _size_monitors | spilling .
// +-----------------------------+---------+----------------------------------------+----------------+-----------
public:
static const int pd_c_runtime_reserved_arg_size;
enum {
first_available_sp_in_frame = 0,
frame_pad_in_bytes = 16,
nof_reg_args = 8
};
public:
static LIR_Opr receiver_opr;
static LIR_Opr r0_opr;
static LIR_Opr r1_opr;
static LIR_Opr r2_opr;
static LIR_Opr r3_opr;
static LIR_Opr r4_opr;
static LIR_Opr r5_opr;
static LIR_Opr r6_opr;
static LIR_Opr r7_opr;
static LIR_Opr r8_opr;
static LIR_Opr r9_opr;
static LIR_Opr r10_opr;
static LIR_Opr r11_opr;
static LIR_Opr r12_opr;
static LIR_Opr r13_opr;
static LIR_Opr r14_opr;
static LIR_Opr r15_opr;
static LIR_Opr r16_opr;
static LIR_Opr r17_opr;
static LIR_Opr r18_opr;
static LIR_Opr r19_opr;
static LIR_Opr r20_opr;
static LIR_Opr r21_opr;
static LIR_Opr r22_opr;
static LIR_Opr r23_opr;
static LIR_Opr r24_opr;
static LIR_Opr r25_opr;
static LIR_Opr r26_opr;
static LIR_Opr r27_opr;
static LIR_Opr r28_opr;
static LIR_Opr r29_opr;
static LIR_Opr r30_opr;
static LIR_Opr rfp_opr;
static LIR_Opr sp_opr;
static LIR_Opr r0_oop_opr;
static LIR_Opr r1_oop_opr;
static LIR_Opr r2_oop_opr;
static LIR_Opr r3_oop_opr;
static LIR_Opr r4_oop_opr;
static LIR_Opr r5_oop_opr;
static LIR_Opr r6_oop_opr;
static LIR_Opr r7_oop_opr;
static LIR_Opr r8_oop_opr;
static LIR_Opr r9_oop_opr;
static LIR_Opr r10_oop_opr;
static LIR_Opr r11_oop_opr;
static LIR_Opr r12_oop_opr;
static LIR_Opr r13_oop_opr;
static LIR_Opr r14_oop_opr;
static LIR_Opr r15_oop_opr;
static LIR_Opr r16_oop_opr;
static LIR_Opr r17_oop_opr;
static LIR_Opr r18_oop_opr;
static LIR_Opr r19_oop_opr;
static LIR_Opr r20_oop_opr;
static LIR_Opr r21_oop_opr;
static LIR_Opr r22_oop_opr;
static LIR_Opr r23_oop_opr;
static LIR_Opr r24_oop_opr;
static LIR_Opr r25_oop_opr;
static LIR_Opr r26_oop_opr;
static LIR_Opr r27_oop_opr;
static LIR_Opr r28_oop_opr;
static LIR_Opr r29_oop_opr;
static LIR_Opr r30_oop_opr;
static LIR_Opr rscratch1_opr;
static LIR_Opr rscratch2_opr;
static LIR_Opr rscratch1_long_opr;
static LIR_Opr rscratch2_long_opr;
static LIR_Opr r0_metadata_opr;
static LIR_Opr r1_metadata_opr;
static LIR_Opr r2_metadata_opr;
static LIR_Opr r3_metadata_opr;
static LIR_Opr r4_metadata_opr;
static LIR_Opr r5_metadata_opr;
static LIR_Opr long0_opr;
static LIR_Opr long1_opr;
static LIR_Opr fpu0_float_opr;
static LIR_Opr fpu0_double_opr;
static LIR_Opr as_long_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
}
static LIR_Opr as_pointer_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
}
// VMReg name for spilled physical FPU stack slot n
static VMReg fpu_regname (int n);
static bool is_caller_save_register (LIR_Opr opr) { return true; }
static bool is_caller_save_register (Register r) { return true; }
static int nof_caller_save_cpu_regs() { return pd_nof_caller_save_cpu_regs_frame_map; }
static int last_cpu_reg() { return pd_last_cpu_reg; }
static int last_byte_reg() { return pd_last_byte_reg; }
#endif // CPU_AARCH64_VM_C1_FRAMEMAP_AARCH64_HPP

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,78 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_X86_VM_C1_LIRASSEMBLER_X86_HPP
#define CPU_X86_VM_C1_LIRASSEMBLER_X86_HPP
private:
int array_element_size(BasicType type) const;
void arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack);
// helper functions which checks for overflow and sets bailout if it
// occurs. Always returns a valid embeddable pointer but in the
// bailout case the pointer won't be to unique storage.
address float_constant(float f);
address double_constant(double d);
address int_constant(jlong n);
bool is_literal_address(LIR_Address* addr);
// When we need to use something other than rscratch1 use this
// method.
Address as_Address(LIR_Address* addr, Register tmp);
// Record the type of the receiver in ReceiverTypeData
void type_profile_helper(Register mdo,
ciMethodData *md, ciProfileData *data,
Register recv, Label* update_done);
void add_debug_info_for_branch(address adr, CodeEmitInfo* info);
void casw(Register addr, Register newval, Register cmpval);
void casl(Register addr, Register newval, Register cmpval);
void poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info = NULL);
static const int max_tableswitches = 20;
struct tableswitch switches[max_tableswitches];
int tableswitch_count;
void init() { tableswitch_count = 0; }
void deoptimize_trap(CodeEmitInfo *info);
public:
void store_parameter(Register r, int offset_from_esp_in_words);
void store_parameter(jint c, int offset_from_esp_in_words);
void store_parameter(jobject c, int offset_from_esp_in_words);
enum { call_stub_size = 12 * NativeInstruction::instruction_size,
exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
deopt_handler_size = 7 * NativeInstruction::instruction_size };
#endif // CPU_X86_VM_C1_LIRASSEMBLER_X86_HPP

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_Instruction.hpp"
#include "c1/c1_LinearScan.hpp"
#include "utilities/bitMap.inline.hpp"
void LinearScan::allocate_fpu_stack() {
// No FPU stack on AArch64
}

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_AARCH64_VM_C1_LINEARSCAN_HPP
#define CPU_AARCH64_VM_C1_LINEARSCAN_HPP
inline bool LinearScan::is_processed_reg_num(int reg_num) {
return reg_num <= FrameMap::last_cpu_reg() || reg_num >= pd_nof_cpu_regs_frame_map;
}
inline int LinearScan::num_physical_regs(BasicType type) {
return 1;
}
inline bool LinearScan::requires_adjacent_regs(BasicType type) {
return false;
}
inline bool LinearScan::is_caller_save(int assigned_reg) {
assert(assigned_reg >= 0 && assigned_reg < nof_regs, "should call this only for registers");
if (assigned_reg < pd_first_callee_saved_reg)
return true;
if (assigned_reg > pd_last_callee_saved_reg && assigned_reg < pd_first_callee_saved_fpu_reg)
return true;
if (assigned_reg > pd_last_callee_saved_fpu_reg && assigned_reg < pd_last_fpu_reg)
return true;
return false;
}
inline void LinearScan::pd_add_temps(LIR_Op* op) {
// FIXME ??
}
// Implementation of LinearScanWalker
inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur) {
if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::callee_saved)) {
assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only");
_first_reg = pd_first_callee_saved_reg;
_last_reg = pd_last_callee_saved_reg;
return true;
} else if (cur->type() == T_INT || cur->type() == T_LONG || cur->type() == T_OBJECT || cur->type() == T_ADDRESS || cur->type() == T_METADATA) {
_first_reg = pd_first_cpu_reg;
_last_reg = pd_last_allocatable_cpu_reg;
return true;
}
return false;
}
#endif // CPU_AARCH64_VM_C1_LINEARSCAN_HPP

View File

@ -0,0 +1,458 @@
/*
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/arrayOop.hpp"
#include "oops/markOop.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/os.hpp"
#include "runtime/stubRoutines.hpp"
void C1_MacroAssembler::float_cmp(bool is_float, int unordered_result,
FloatRegister f0, FloatRegister f1,
Register result)
{
Label done;
if (is_float) {
fcmps(f0, f1);
} else {
fcmpd(f0, f1);
}
if (unordered_result < 0) {
// we want -1 for unordered or less than, 0 for equal and 1 for
// greater than.
cset(result, NE); // Not equal or unordered
cneg(result, result, LT); // Less than or unordered
} else {
// we want -1 for less than, 0 for equal and 1 for unordered or
// greater than.
cset(result, NE); // Not equal or unordered
cneg(result, result, LO); // Less than
}
}
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) {
const int aligned_mask = BytesPerWord -1;
const int hdr_offset = oopDesc::mark_offset_in_bytes();
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
Label done, fail;
int null_check_offset = -1;
verify_oop(obj);
// save object being locked into the BasicObjectLock
str(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
if (UseBiasedLocking) {
assert(scratch != noreg, "should have scratch register at this point");
null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, false, done, &slow_case);
} else {
null_check_offset = offset();
}
// Load object header
ldr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked
orr(hdr, hdr, markOopDesc::unlocked_value);
// save unlocked object header into the displaced header location on the stack
str(hdr, Address(disp_hdr, 0));
// test if object header is still the same (i.e. unlocked), and if so, store the
// displaced header address in the object header - if it is not the same, get the
// object header instead
lea(rscratch2, Address(obj, hdr_offset));
cmpxchgptr(hdr, disp_hdr, rscratch2, rscratch1, done, /*fallthough*/NULL);
// if the object header was the same, we're done
// if the object header was not the same, it is now in the hdr register
// => test if it is a stack pointer into the same stack (recursive locking), i.e.:
//
// 1) (hdr & aligned_mask) == 0
// 2) sp <= hdr
// 3) hdr <= sp + page_size
//
// these 3 tests can be done by evaluating the following expression:
//
// (hdr - sp) & (aligned_mask - page_size)
//
// assuming both the stack pointer and page_size have their least
// significant 2 bits cleared and page_size is a power of 2
mov(rscratch1, sp);
sub(hdr, hdr, rscratch1);
ands(hdr, hdr, aligned_mask - os::vm_page_size());
// for recursive locking, the result is zero => save it in the displaced header
// location (NULL in the displaced hdr location indicates recursive locking)
str(hdr, Address(disp_hdr, 0));
// otherwise we don't care about the result and handle locking via runtime call
cbnz(hdr, slow_case);
// done
bind(done);
if (PrintBiasedLockingStatistics) {
lea(rscratch2, ExternalAddress((address)BiasedLocking::fast_path_entry_count_addr()));
addmw(Address(rscratch2, 0), 1, rscratch1);
}
return null_check_offset;
}
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
const int aligned_mask = BytesPerWord -1;
const int hdr_offset = oopDesc::mark_offset_in_bytes();
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
Label done;
if (UseBiasedLocking) {
// load object
ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
biased_locking_exit(obj, hdr, done);
}
// load displaced header
ldr(hdr, Address(disp_hdr, 0));
// if the loaded hdr is NULL we had recursive locking
// if we had recursive locking, we are done
cbz(hdr, done);
if (!UseBiasedLocking) {
// load object
ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
}
verify_oop(obj);
// test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to
// the displaced header, get the object header instead
// if the object header was not pointing to the displaced header,
// we do unlocking via runtime call
if (hdr_offset) {
lea(rscratch1, Address(obj, hdr_offset));
cmpxchgptr(disp_hdr, hdr, rscratch1, rscratch2, done, &slow_case);
} else {
cmpxchgptr(disp_hdr, hdr, obj, rscratch2, done, &slow_case);
}
// done
bind(done);
}
// Defines obj, preserves var_size_in_bytes
void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) {
if (UseTLAB) {
tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
} else {
eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
incr_allocated_bytes(noreg, var_size_in_bytes, con_size_in_bytes, t1);
}
}
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
assert_different_registers(obj, klass, len);
if (UseBiasedLocking && !len->is_valid()) {
assert_different_registers(obj, klass, len, t1, t2);
ldr(t1, Address(klass, Klass::prototype_header_offset()));
} else {
// This assumes that all prototype bits fit in an int32_t
mov(t1, (int32_t)(intptr_t)markOopDesc::prototype());
}
str(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
if (UseCompressedClassPointers) { // Take care not to kill klass
encode_klass_not_null(t1, klass);
strw(t1, Address(obj, oopDesc::klass_offset_in_bytes()));
} else {
str(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
}
if (len->is_valid()) {
strw(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
} else if (UseCompressedClassPointers) {
store_klass_gap(obj, zr);
}
}
// Zero words; len is in bytes
// Destroys all registers except addr
// len must be a nonzero multiple of wordSize
void C1_MacroAssembler::zero_memory(Register addr, Register len, Register t1) {
assert_different_registers(addr, len, t1, rscratch1, rscratch2);
#ifdef ASSERT
{ Label L;
tst(len, BytesPerWord - 1);
br(Assembler::EQ, L);
stop("len is not a multiple of BytesPerWord");
bind(L);
}
#endif
#ifndef PRODUCT
block_comment("zero memory");
#endif
Label loop;
Label entry;
// Algorithm:
//
// scratch1 = cnt & 7;
// cnt -= scratch1;
// p += scratch1;
// switch (scratch1) {
// do {
// cnt -= 8;
// p[-8] = 0;
// case 7:
// p[-7] = 0;
// case 6:
// p[-6] = 0;
// // ...
// case 1:
// p[-1] = 0;
// case 0:
// p += 8;
// } while (cnt);
// }
const int unroll = 8; // Number of str(zr) instructions we'll unroll
lsr(len, len, LogBytesPerWord);
andr(rscratch1, len, unroll - 1); // tmp1 = cnt % unroll
sub(len, len, rscratch1); // cnt -= unroll
// t1 always points to the end of the region we're about to zero
add(t1, addr, rscratch1, Assembler::LSL, LogBytesPerWord);
adr(rscratch2, entry);
sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
br(rscratch2);
bind(loop);
sub(len, len, unroll);
for (int i = -unroll; i < 0; i++)
str(zr, Address(t1, i * wordSize));
bind(entry);
add(t1, t1, unroll * wordSize);
cbnz(len, loop);
}
// preserves obj, destroys len_in_bytes
void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1) {
Label done;
assert(obj != len_in_bytes && obj != t1 && t1 != len_in_bytes, "registers must be different");
assert((hdr_size_in_bytes & (BytesPerWord - 1)) == 0, "header size is not a multiple of BytesPerWord");
Register index = len_in_bytes;
// index is positive and ptr sized
subs(index, index, hdr_size_in_bytes);
br(Assembler::EQ, done);
// note: for the remaining code to work, index must be a multiple of BytesPerWord
#ifdef ASSERT
{ Label L;
tst(index, BytesPerWord - 1);
br(Assembler::EQ, L);
stop("index is not a multiple of BytesPerWord");
bind(L);
}
#endif
// Preserve obj
if (hdr_size_in_bytes)
add(obj, obj, hdr_size_in_bytes);
zero_memory(obj, index, t1);
if (hdr_size_in_bytes)
sub(obj, obj, hdr_size_in_bytes);
// done
bind(done);
}
void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) {
assert_different_registers(obj, t1, t2); // XXX really?
assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
try_allocate(obj, noreg, object_size * BytesPerWord, t1, t2, slow_case);
initialize_object(obj, klass, noreg, object_size * HeapWordSize, t1, t2);
}
void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2) {
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
"con_size_in_bytes is not multiple of alignment");
const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
initialize_header(obj, klass, noreg, t1, t2);
// clear rest of allocated space
const Register index = t2;
const int threshold = 16 * BytesPerWord; // approximate break even point for code size (see comments below)
if (var_size_in_bytes != noreg) {
mov(index, var_size_in_bytes);
initialize_body(obj, index, hdr_size_in_bytes, t1);
} else if (con_size_in_bytes <= threshold) {
// use explicit null stores
int i = hdr_size_in_bytes;
if (i < con_size_in_bytes && (con_size_in_bytes % (2 * BytesPerWord))) {
str(zr, Address(obj, i));
i += BytesPerWord;
}
for (; i < con_size_in_bytes; i += 2 * BytesPerWord)
stp(zr, zr, Address(obj, i));
} else if (con_size_in_bytes > hdr_size_in_bytes) {
block_comment("zero memory");
// use loop to null out the fields
int words = (con_size_in_bytes - hdr_size_in_bytes) / BytesPerWord;
mov(index, words / 8);
const int unroll = 8; // Number of str(zr) instructions we'll unroll
int remainder = words % unroll;
lea(rscratch1, Address(obj, hdr_size_in_bytes + remainder * BytesPerWord));
Label entry_point, loop;
b(entry_point);
bind(loop);
sub(index, index, 1);
for (int i = -unroll; i < 0; i++) {
if (-i == remainder)
bind(entry_point);
str(zr, Address(rscratch1, i * wordSize));
}
if (remainder == 0)
bind(entry_point);
add(rscratch1, rscratch1, unroll * wordSize);
cbnz(index, loop);
}
membar(StoreStore);
if (CURRENT_ENV->dtrace_alloc_probes()) {
assert(obj == r0, "must be");
far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
}
verify_oop(obj);
}
void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int header_size, int f, Register klass, Label& slow_case) {
assert_different_registers(obj, len, t1, t2, klass);
// determine alignment mask
assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
// check for negative or excessive length
mov(rscratch1, (int32_t)max_array_allocation_length);
cmp(len, rscratch1);
br(Assembler::HS, slow_case);
const Register arr_size = t2; // okay to be the same
// align object end
mov(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask);
add(arr_size, arr_size, len, ext::uxtw, f);
andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
try_allocate(obj, arr_size, 0, t1, t2, slow_case);
initialize_header(obj, klass, len, t1, t2);
// clear rest of allocated space
const Register len_zero = len;
initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero);
membar(StoreStore);
if (CURRENT_ENV->dtrace_alloc_probes()) {
assert(obj == r0, "must be");
far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
}
verify_oop(obj);
}
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
verify_oop(receiver);
// explicit NULL check not needed since load from [klass_offset] causes a trap
// check against inline cache
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
cmp_klass(receiver, iCache, rscratch1);
}
void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
// If we have to make this method not-entrant we'll overwrite its
// first instruction with a jump. For this action to be legal we
// must ensure that this first instruction is a B, BL, NOP, BKPT,
// SVC, HVC, or SMC. Make it a NOP.
nop();
assert(bang_size_in_bytes >= framesize, "stack bang size incorrect");
// Make sure there is enough stack space for this method's activation.
// Note that we do this before doing an enter().
generate_stack_overflow_check(bang_size_in_bytes);
MacroAssembler::build_frame(framesize + 2 * wordSize);
if (NotifySimulator) {
notify(Assembler::method_entry);
}
}
void C1_MacroAssembler::remove_frame(int framesize) {
MacroAssembler::remove_frame(framesize + 2 * wordSize);
if (NotifySimulator) {
notify(Assembler::method_reentry);
}
}
void C1_MacroAssembler::verified_entry() {
}
#ifndef PRODUCT
void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
if (!VerifyOops) return;
verify_oop_addr(Address(sp, stack_offset), "oop");
}
void C1_MacroAssembler::verify_not_null_oop(Register r) {
if (!VerifyOops) return;
Label not_null;
cbnz(r, not_null);
stop("non-null oop required");
bind(not_null);
verify_oop(r);
}
void C1_MacroAssembler::invalidate_registers(bool inv_r0, bool inv_r19, bool inv_r2, bool inv_r3, bool inv_r4, bool inv_r5) {
#ifdef ASSERT
static int nn;
if (inv_r0) mov(r0, 0xDEAD);
if (inv_r19) mov(r19, 0xDEAD);
if (inv_r2) mov(r2, nn++);
if (inv_r3) mov(r3, 0xDEAD);
if (inv_r4) mov(r4, 0xDEAD);
if (inv_r5) mov(r5, 0xDEAD);
#endif
}
#endif // ifndef PRODUCT

View File

@ -0,0 +1,108 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_AARCH64_VM_C1_MACROASSEMBLER_AARCH64_HPP
#define CPU_AARCH64_VM_C1_MACROASSEMBLER_AARCH64_HPP
using MacroAssembler::build_frame;
// C1_MacroAssembler contains high-level macros for C1
private:
int _rsp_offset; // track rsp changes
// initialization
void pd_init() { _rsp_offset = 0; }
void zero_memory(Register addr, Register len, Register t1);
public:
void try_allocate(
Register obj, // result: pointer to object after successful allocation
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
int con_size_in_bytes, // object size in bytes if known at compile time
Register t1, // temp register
Register t2, // temp register
Label& slow_case // continuation point if fast allocation fails
);
void initialize_header(Register obj, Register klass, Register len, Register t1, Register t2);
void initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1);
void float_cmp(bool is_float, int unordered_result,
FloatRegister f0, FloatRegister f1,
Register result);
// locking
// hdr : must be r0, contents destroyed
// obj : must point to the object to lock, contents preserved
// disp_hdr: must point to the displaced header location, contents preserved
// scratch : scratch register, contents destroyed
// returns code offset at which to add null check debug information
int lock_object (Register swap, Register obj, Register disp_hdr, Register scratch, Label& slow_case);
// unlocking
// hdr : contents destroyed
// obj : must point to the object to lock, contents preserved
// disp_hdr: must be r0 & must point to the displaced header location, contents destroyed
void unlock_object(Register swap, Register obj, Register lock, Label& slow_case);
void initialize_object(
Register obj, // result: pointer to object after successful allocation
Register klass, // object klass
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
int con_size_in_bytes, // object size in bytes if known at compile time
Register t1, // temp register
Register t2 // temp register
);
// allocation of fixed-size objects
// (can also be used to allocate fixed-size arrays, by setting
// hdr_size correctly and storing the array length afterwards)
// obj : will contain pointer to allocated object
// t1, t2 : scratch registers - contents destroyed
// header_size: size of object header in words
// object_size: total size of object in words
// slow_case : exit to slow case implementation if fast allocation fails
void allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case);
enum {
max_array_allocation_length = 0x00FFFFFF
};
// allocation of arrays
// obj : will contain pointer to allocated object
// len : array length in number of elements
// t : scratch register - contents destroyed
// header_size: size of object header in words
// f : element scale factor
// slow_case : exit to slow case implementation if fast allocation fails
void allocate_array(Register obj, Register len, Register t, Register t2, int header_size, int f, Register klass, Label& slow_case);
int rsp_offset() const { return _rsp_offset; }
void set_rsp_offset(int n) { _rsp_offset = n; }
void invalidate_registers(bool inv_r0, bool inv_r19, bool inv_r2, bool inv_r3, bool inv_r4, bool inv_r5) PRODUCT_RETURN;
#endif // CPU_AARCH64_VM_C1_MACROASSEMBLER_AARCH64_HPP

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,81 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_AARCH64_VM_C1_GLOBALS_AARCH64_HPP
#define CPU_AARCH64_VM_C1_GLOBALS_AARCH64_HPP
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
// Sets the default values for platform dependent flags used by the client compiler.
// (see c1_globals.hpp)
#ifndef TIERED
define_pd_global(bool, BackgroundCompilation, true );
define_pd_global(bool, UseTLAB, true );
define_pd_global(bool, ResizeTLAB, true );
define_pd_global(bool, InlineIntrinsics, true );
define_pd_global(bool, PreferInterpreterNativeStubs, false);
define_pd_global(bool, ProfileTraps, false);
define_pd_global(bool, UseOnStackReplacement, true );
define_pd_global(bool, TieredCompilation, false);
#ifdef BUILTIN_SIM
// We compile very aggressively with the builtin simulator because
// doing so greatly reduces run times and tests more code.
define_pd_global(intx, CompileThreshold, 150 );
define_pd_global(intx, BackEdgeThreshold, 500);
#else
define_pd_global(intx, CompileThreshold, 1500 );
define_pd_global(intx, BackEdgeThreshold, 100000);
#endif
define_pd_global(intx, OnStackReplacePercentage, 933 );
define_pd_global(intx, FreqInlineSize, 325 );
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
define_pd_global(intx, InitialCodeCacheSize, 160*K);
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(uintx, MetaspaceSize, 12*M );
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
define_pd_global(bool, CICompileOSR, true );
#endif // !TIERED
define_pd_global(bool, UseTypeProfile, false);
define_pd_global(bool, RoundFPResults, true );
define_pd_global(bool, LIRFillDelaySlots, false);
define_pd_global(bool, OptimizeSinglePrecision, true );
define_pd_global(bool, CSEArrayLength, false);
define_pd_global(bool, TwoOperandLIRForm, false );
define_pd_global(intx, SafepointPollOffset, 0 );
#endif // CPU_AARCH64_VM_C1_GLOBALS_AARCH64_HPP

View File

@ -0,0 +1,91 @@
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_AARCH64_VM_C2_GLOBALS_AARCH64_HPP
#define CPU_AARCH64_VM_C2_GLOBALS_AARCH64_HPP
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
// Sets the default values for platform dependent flags used by the server compiler.
// (see c2_globals.hpp). Alpha-sorted.
define_pd_global(bool, BackgroundCompilation, true);
define_pd_global(bool, UseTLAB, true);
define_pd_global(bool, ResizeTLAB, true);
define_pd_global(bool, CICompileOSR, true);
define_pd_global(bool, InlineIntrinsics, true);
define_pd_global(bool, PreferInterpreterNativeStubs, false);
define_pd_global(bool, ProfileTraps, true);
define_pd_global(bool, UseOnStackReplacement, true);
#ifdef CC_INTERP
define_pd_global(bool, ProfileInterpreter, false);
#else
define_pd_global(bool, ProfileInterpreter, true);
#endif // CC_INTERP
define_pd_global(bool, TieredCompilation, trueInTiered);
define_pd_global(intx, CompileThreshold, 10000);
define_pd_global(intx, BackEdgeThreshold, 100000);
define_pd_global(intx, OnStackReplacePercentage, 140);
define_pd_global(intx, ConditionalMoveLimit, 3);
define_pd_global(intx, FLOATPRESSURE, 64);
define_pd_global(intx, FreqInlineSize, 325);
define_pd_global(intx, MinJumpTableSize, 10);
define_pd_global(intx, INTPRESSURE, 25);
define_pd_global(intx, InteriorEntryAlignment, 16);
define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
define_pd_global(intx, LoopUnrollLimit, 60);
// InitialCodeCacheSize derived from specjbb2000 run.
define_pd_global(intx, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags
define_pd_global(uint64_t,MaxRAM, 128ULL*G);
define_pd_global(intx, RegisterCostAreaRatio, 16000);
// Peephole and CISC spilling both break the graph, and so makes the
// scheduler sick.
define_pd_global(bool, OptoPeephole, true);
define_pd_global(bool, UseCISCSpill, true);
define_pd_global(bool, OptoScheduling, false);
define_pd_global(bool, OptoBundling, false);
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
define_pd_global(intx, ProfiledCodeHeapSize, 22*M);
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
// Heap related flags
define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M));
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed.
#endif // CPU_AARCH64_VM_C2_GLOBALS_AARCH64_HPP

View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "opto/compile.hpp"
#include "opto/node.hpp"
// processor dependent initialization for i486
void Compile::pd_compiler2_init() {
guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" );
// QQQ presumably all 64bit cpu's support this. Seems like the ifdef could
// simply be left out.
}