Merge
This commit is contained in:
commit
3d15529c03
@ -64,7 +64,7 @@ _jvmciModes = {
|
||||
_jdkDebugLevels = ['release', 'fastdebug', 'slowdebug']
|
||||
|
||||
# TODO: add client once/if it can be built on 64-bit platforms
|
||||
_jdkJvmVariants = ['server']
|
||||
_jdkJvmVariants = ['server', 'client']
|
||||
|
||||
"""
|
||||
Translation table from mx_jvmci:8 --vmbuild values to mx_jvmci:9 --jdk-debug-level values.
|
||||
|
@ -50,9 +50,9 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/BoolReturn \
|
||||
$(HOTSPOT_TOPDIR)/test/compiler/floatingpoint/ \
|
||||
$(HOTSPOT_TOPDIR)/test/compiler/calls \
|
||||
$(HOTSPOT_TOPDIR)/test/compiler/native \
|
||||
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetNamedModule \
|
||||
$(HOTSPOT_TOPDIR)/test/testlibrary/jvmti \
|
||||
$(HOTSPOT_TOPDIR)/test/compiler/jvmci/jdk.vm.ci.code.test \
|
||||
#
|
||||
|
||||
# Add conditional directories here when needed.
|
||||
|
@ -1942,12 +1942,35 @@ source %{
|
||||
|
||||
bool is_CAS(int opcode)
|
||||
{
|
||||
return (opcode == Op_CompareAndSwapI ||
|
||||
opcode == Op_CompareAndSwapL ||
|
||||
opcode == Op_CompareAndSwapN ||
|
||||
opcode == Op_CompareAndSwapP);
|
||||
switch(opcode) {
|
||||
// We handle these
|
||||
case Op_CompareAndSwapI:
|
||||
case Op_CompareAndSwapL:
|
||||
case Op_CompareAndSwapP:
|
||||
case Op_CompareAndSwapN:
|
||||
// case Op_CompareAndSwapB:
|
||||
// case Op_CompareAndSwapS:
|
||||
return true;
|
||||
// These are TBD
|
||||
case Op_WeakCompareAndSwapB:
|
||||
case Op_WeakCompareAndSwapS:
|
||||
case Op_WeakCompareAndSwapI:
|
||||
case Op_WeakCompareAndSwapL:
|
||||
case Op_WeakCompareAndSwapP:
|
||||
case Op_WeakCompareAndSwapN:
|
||||
case Op_CompareAndExchangeB:
|
||||
case Op_CompareAndExchangeS:
|
||||
case Op_CompareAndExchangeI:
|
||||
case Op_CompareAndExchangeL:
|
||||
case Op_CompareAndExchangeP:
|
||||
case Op_CompareAndExchangeN:
|
||||
return false;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// leading_to_trailing
|
||||
//
|
||||
//graph traversal helper which detects the normal case Mem feed from
|
||||
@ -3330,9 +3353,6 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
|
||||
const bool Matcher::match_rule_supported(int opcode) {
|
||||
|
||||
switch (opcode) {
|
||||
case Op_StrComp:
|
||||
if (CompactStrings) return false;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -4241,14 +4261,16 @@ encode %{
|
||||
MacroAssembler _masm(&cbuf);
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
|
||||
Assembler::xword, /*acquire*/ false, /*release*/ true);
|
||||
Assembler::xword, /*acquire*/ false, /*release*/ true,
|
||||
/*weak*/ false, noreg);
|
||||
%}
|
||||
|
||||
enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
|
||||
MacroAssembler _masm(&cbuf);
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
|
||||
Assembler::word, /*acquire*/ false, /*release*/ true);
|
||||
Assembler::word, /*acquire*/ false, /*release*/ true,
|
||||
/*weak*/ false, noreg);
|
||||
%}
|
||||
|
||||
|
||||
@ -4260,14 +4282,16 @@ encode %{
|
||||
MacroAssembler _masm(&cbuf);
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
|
||||
Assembler::xword, /*acquire*/ true, /*release*/ true);
|
||||
Assembler::xword, /*acquire*/ true, /*release*/ true,
|
||||
/*weak*/ false, noreg);
|
||||
%}
|
||||
|
||||
enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
|
||||
MacroAssembler _masm(&cbuf);
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
|
||||
Assembler::word, /*acquire*/ true, /*release*/ true);
|
||||
Assembler::word, /*acquire*/ true, /*release*/ true,
|
||||
/*weak*/ false, noreg);
|
||||
%}
|
||||
|
||||
|
||||
@ -5806,6 +5830,7 @@ operand iRegLNoSp()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(no_special_reg));
|
||||
match(RegL);
|
||||
match(iRegL_R0);
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
@ -5927,6 +5952,39 @@ operand iRegP_R10()
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
// Long 64 bit Register R0 only
|
||||
operand iRegL_R0()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(r0_reg));
|
||||
match(RegL);
|
||||
match(iRegLNoSp);
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
// Long 64 bit Register R2 only
|
||||
operand iRegL_R2()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(r2_reg));
|
||||
match(RegL);
|
||||
match(iRegLNoSp);
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
// Long 64 bit Register R3 only
|
||||
operand iRegL_R3()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(r3_reg));
|
||||
match(RegL);
|
||||
match(iRegLNoSp);
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
// Long 64 bit Register R11 only
|
||||
operand iRegL_R11()
|
||||
%{
|
||||
@ -5983,7 +6041,7 @@ operand iRegI_R3()
|
||||
%}
|
||||
|
||||
|
||||
// Register R2 only
|
||||
// Register R4 only
|
||||
operand iRegI_R4()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(int_r4_reg));
|
||||
@ -6007,6 +6065,33 @@ operand iRegN()
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand iRegN_R0()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(r0_reg));
|
||||
match(iRegN);
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand iRegN_R2()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(r2_reg));
|
||||
match(iRegN);
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand iRegN_R3()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(r3_reg));
|
||||
match(iRegN);
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
// Integer 64 bit Register not Special
|
||||
operand iRegNNoSp()
|
||||
%{
|
||||
@ -9393,12 +9478,12 @@ instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFl
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
|
||||
// can't match them
|
||||
|
||||
// standard CompareAndSwapX when we are using barriers
|
||||
// these have higher priority than the rules selected by a predicate
|
||||
|
||||
// XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
|
||||
// can't match them
|
||||
|
||||
instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
|
||||
|
||||
match(Set res (CompareAndSwapI mem (Binary oldval newval)));
|
||||
@ -9550,6 +9635,216 @@ instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegN
|
||||
%}
|
||||
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
// Sundry CAS operations. Note that release is always true,
|
||||
// regardless of the memory ordering of the CAS. This is because we
|
||||
// need the volatile case to be sequentially consistent but there is
|
||||
// no trailing StoreLoad barrier emitted by C2. Unfortunately we
|
||||
// can't check the type of memory ordering here, so we always emit a
|
||||
// STLXR.
|
||||
|
||||
// This section is generated from aarch64_ad_cas.m4
|
||||
|
||||
|
||||
instruct compareAndExchangeB(iRegI_R0 res, indirect mem, iRegI_R2 oldval, iRegI_R3 newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(KILL cr);
|
||||
format %{
|
||||
"cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ uxtbw(rscratch2, $oldval$$Register);
|
||||
__ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
|
||||
Assembler::byte, /*acquire*/ false, /*release*/ true,
|
||||
/*weak*/ false, $res$$Register);
|
||||
__ sxtbw($res$$Register, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeS(iRegI_R0 res, indirect mem, iRegI_R2 oldval, iRegI_R3 newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(KILL cr);
|
||||
format %{
|
||||
"cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ uxthw(rscratch2, $oldval$$Register);
|
||||
__ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
|
||||
Assembler::halfword, /*acquire*/ false, /*release*/ true,
|
||||
/*weak*/ false, $res$$Register);
|
||||
__ sxthw($res$$Register, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeI(iRegI_R0 res, indirect mem, iRegI_R2 oldval, iRegI_R3 newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(KILL cr);
|
||||
format %{
|
||||
"cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
Assembler::word, /*acquire*/ false, /*release*/ true,
|
||||
/*weak*/ false, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeL(iRegL_R0 res, indirect mem, iRegL_R2 oldval, iRegL_R3 newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(KILL cr);
|
||||
format %{
|
||||
"cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
Assembler::xword, /*acquire*/ false, /*release*/ true,
|
||||
/*weak*/ false, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeN(iRegN_R0 res, indirect mem, iRegN_R2 oldval, iRegN_R3 newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(KILL cr);
|
||||
format %{
|
||||
"cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
Assembler::word, /*acquire*/ false, /*release*/ true,
|
||||
/*weak*/ false, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeP(iRegP_R0 res, indirect mem, iRegP_R2 oldval, iRegP_R3 newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(KILL cr);
|
||||
format %{
|
||||
"cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
Assembler::xword, /*acquire*/ false, /*release*/ true,
|
||||
/*weak*/ false, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
|
||||
match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(KILL cr);
|
||||
format %{
|
||||
"cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ uxtbw(rscratch2, $oldval$$Register);
|
||||
__ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
|
||||
Assembler::byte, /*acquire*/ false, /*release*/ true,
|
||||
/*weak*/ true, noreg);
|
||||
__ csetw($res$$Register, Assembler::EQ);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
|
||||
match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(KILL cr);
|
||||
format %{
|
||||
"cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ uxthw(rscratch2, $oldval$$Register);
|
||||
__ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
|
||||
Assembler::halfword, /*acquire*/ false, /*release*/ true,
|
||||
/*weak*/ true, noreg);
|
||||
__ csetw($res$$Register, Assembler::EQ);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
|
||||
match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(KILL cr);
|
||||
format %{
|
||||
"cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
Assembler::word, /*acquire*/ false, /*release*/ true,
|
||||
/*weak*/ true, noreg);
|
||||
__ csetw($res$$Register, Assembler::EQ);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
|
||||
match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(KILL cr);
|
||||
format %{
|
||||
"cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
Assembler::xword, /*acquire*/ false, /*release*/ true,
|
||||
/*weak*/ true, noreg);
|
||||
__ csetw($res$$Register, Assembler::EQ);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
|
||||
match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(KILL cr);
|
||||
format %{
|
||||
"cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
Assembler::word, /*acquire*/ false, /*release*/ true,
|
||||
/*weak*/ true, noreg);
|
||||
__ csetw($res$$Register, Assembler::EQ);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(KILL cr);
|
||||
format %{
|
||||
"cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
Assembler::xword, /*acquire*/ false, /*release*/ true,
|
||||
/*weak*/ true, noreg);
|
||||
__ csetw($res$$Register, Assembler::EQ);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
|
||||
match(Set prev (GetAndSetI mem newv));
|
||||
format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
|
||||
@ -14988,11 +15283,61 @@ instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 c
|
||||
format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result # KILL $tmp1" %}
|
||||
ins_encode %{
|
||||
// Count is in 8-bit bytes; non-Compact chars are 16 bits.
|
||||
__ asrw($cnt1$$Register, $cnt1$$Register, 1);
|
||||
__ asrw($cnt2$$Register, $cnt2$$Register, 1);
|
||||
__ string_compare($str1$$Register, $str2$$Register,
|
||||
$cnt1$$Register, $cnt2$$Register, $result$$Register,
|
||||
$tmp1$$Register);
|
||||
$tmp1$$Register,
|
||||
fnoreg, fnoreg, StrIntrinsicNode::UU);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
|
||||
iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
|
||||
%{
|
||||
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
|
||||
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
|
||||
effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
|
||||
|
||||
format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result # KILL $tmp1" %}
|
||||
ins_encode %{
|
||||
__ string_compare($str1$$Register, $str2$$Register,
|
||||
$cnt1$$Register, $cnt2$$Register, $result$$Register,
|
||||
$tmp1$$Register,
|
||||
fnoreg, fnoreg, StrIntrinsicNode::LL);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
|
||||
iRegI_R0 result, vRegD vtmp1, vRegD vtmp2, iRegP_R10 tmp1, rFlagsReg cr)
|
||||
%{
|
||||
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
|
||||
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
|
||||
effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP vtmp1, TEMP vtmp2, KILL cr);
|
||||
|
||||
format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result # KILL $tmp1" %}
|
||||
ins_encode %{
|
||||
__ string_compare($str1$$Register, $str2$$Register,
|
||||
$cnt1$$Register, $cnt2$$Register, $result$$Register,
|
||||
$tmp1$$Register,
|
||||
$vtmp1$$FloatRegister, $vtmp2$$FloatRegister, StrIntrinsicNode::UL);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
|
||||
iRegI_R0 result, vRegD vtmp1, vRegD vtmp2, iRegP_R10 tmp1, rFlagsReg cr)
|
||||
%{
|
||||
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
|
||||
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
|
||||
effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP vtmp1, TEMP vtmp2, KILL cr);
|
||||
|
||||
format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result # KILL $tmp1" %}
|
||||
ins_encode %{
|
||||
__ string_compare($str1$$Register, $str2$$Register,
|
||||
$cnt1$$Register, $cnt2$$Register, $result$$Register,
|
||||
$tmp1$$Register,
|
||||
$vtmp1$$FloatRegister, $vtmp2$$FloatRegister, StrIntrinsicNode::LU);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
@ -1556,13 +1556,13 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
}
|
||||
|
||||
void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
|
||||
__ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, rscratch1);
|
||||
__ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
|
||||
__ cset(rscratch1, Assembler::NE);
|
||||
__ membar(__ AnyAny);
|
||||
}
|
||||
|
||||
void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
|
||||
__ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, rscratch1);
|
||||
__ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
|
||||
__ cset(rscratch1, Assembler::NE);
|
||||
__ membar(__ AnyAny);
|
||||
}
|
||||
|
@ -808,7 +808,6 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
|
||||
} else {
|
||||
a = new LIR_Address(obj.result(),
|
||||
offset.result(),
|
||||
LIR_Address::times_1,
|
||||
0,
|
||||
as_BasicType(type));
|
||||
}
|
||||
@ -1002,7 +1001,6 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
|
||||
|
||||
LIR_Address* a = new LIR_Address(base_op,
|
||||
index,
|
||||
LIR_Address::times_1,
|
||||
offset,
|
||||
T_BYTE);
|
||||
BasicTypeList signature(3);
|
||||
|
54
hotspot/src/cpu/aarch64/vm/c1_LIR_aarch64.cpp
Normal file
54
hotspot/src/cpu/aarch64/vm/c1_LIR_aarch64.cpp
Normal file
@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/register.hpp"
|
||||
#include "c1/c1_LIR.hpp"
|
||||
|
||||
FloatRegister LIR_OprDesc::as_float_reg() const {
|
||||
return as_FloatRegister(fpu_regnr());
|
||||
}
|
||||
|
||||
FloatRegister LIR_OprDesc::as_double_reg() const {
|
||||
return as_FloatRegister(fpu_regnrLo());
|
||||
}
|
||||
|
||||
// Reg2 unused.
|
||||
LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
|
||||
assert(as_FloatRegister(reg2) == fnoreg, "Not used on this platform");
|
||||
return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
|
||||
(reg1 << LIR_OprDesc::reg2_shift) |
|
||||
LIR_OprDesc::double_type |
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::double_size);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void LIR_Address::verify() const {
|
||||
assert(base()->is_cpu_register(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_double_cpu() || index()->is_single_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
}
|
||||
#endif // PRODUCT
|
109
hotspot/src/cpu/aarch64/vm/cas.m4
Normal file
109
hotspot/src/cpu/aarch64/vm/cas.m4
Normal file
@ -0,0 +1,109 @@
|
||||
// Sundry CAS operations. Note that release is always true,
|
||||
// regardless of the memory ordering of the CAS. This is because we
|
||||
// need the volatile case to be sequentially consistent but there is
|
||||
// no trailing StoreLoad barrier emitted by C2. Unfortunately we
|
||||
// can't check the type of memory ordering here, so we always emit a
|
||||
// STLXR.
|
||||
|
||||
define(`CAS_INSN',
|
||||
`
|
||||
instruct compareAndExchange$1$5(iReg$2_R0 res, indirect mem, iReg$2_R2 oldval, iReg$2_R3 newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndExchange$1 mem (Binary oldval newval)));
|
||||
ifelse($5,Acq,' predicate(needs_acquiring_load_exclusive(n));
|
||||
ins_cost(VOLATILE_REF_COST);`,' ins_cost(2 * VOLATILE_REF_COST);`)
|
||||
effect(KILL cr);
|
||||
format %{
|
||||
"cmpxchg $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
Assembler::$4, /*acquire*/ ifelse($5,Acq,true,false), /*release*/ true,
|
||||
/*weak*/ false, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}')dnl
|
||||
define(`CAS_INSN4',
|
||||
`
|
||||
instruct compareAndExchange$1$7(iReg$2_R0 res, indirect mem, iReg$2_R2 oldval, iReg$2_R3 newval, rFlagsReg cr) %{
|
||||
match(Set res (CompareAndExchange$1 mem (Binary oldval newval)));
|
||||
ifelse($7,Acq,' predicate(needs_acquiring_load_exclusive(n));
|
||||
ins_cost(VOLATILE_REF_COST);`,' ins_cost(2 * VOLATILE_REF_COST);`)
|
||||
effect(KILL cr);
|
||||
format %{
|
||||
"cmpxchg $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ $5(rscratch2, $oldval$$Register);
|
||||
__ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
|
||||
Assembler::$4, /*acquire*/ ifelse($5,Acq,true,false), /*release*/ true,
|
||||
/*weak*/ false, $res$$Register);
|
||||
__ $6($res$$Register, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}')dnl
|
||||
CAS_INSN4(B,I,byte,byte,uxtbw,sxtbw)
|
||||
CAS_INSN4(S,I,short,halfword,uxthw,sxthw)
|
||||
CAS_INSN(I,I,int,word)
|
||||
CAS_INSN(L,L,long,xword)
|
||||
CAS_INSN(N,N,narrow oop,word)
|
||||
CAS_INSN(P,P,ptr,xword)
|
||||
dnl
|
||||
dnl CAS_INSN4(B,I,byte,byte,uxtbw,sxtbw,Acq)
|
||||
dnl CAS_INSN4(S,I,short,halfword,uxthw,sxthw,Acq)
|
||||
dnl CAS_INSN(I,I,int,word,Acq)
|
||||
dnl CAS_INSN(L,L,long,xword,Acq)
|
||||
dnl CAS_INSN(N,N,narrow oop,word,Acq)
|
||||
dnl CAS_INSN(P,P,ptr,xword,Acq)
|
||||
dnl
|
||||
define(`CAS_INSN2',
|
||||
`
|
||||
instruct weakCompareAndSwap$1$6(iRegINoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
|
||||
match(Set res (WeakCompareAndSwap$1 mem (Binary oldval newval)));
|
||||
ifelse($6,Acq,' predicate(needs_acquiring_load_exclusive(n));
|
||||
ins_cost(VOLATILE_REF_COST);`,' ins_cost(2 * VOLATILE_REF_COST);`)
|
||||
effect(KILL cr);
|
||||
format %{
|
||||
"cmpxchg $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ uxt$5(rscratch2, $oldval$$Register);
|
||||
__ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
|
||||
Assembler::$4, /*acquire*/ ifelse($6,Acq,true,false), /*release*/ true,
|
||||
/*weak*/ true, noreg);
|
||||
__ csetw($res$$Register, Assembler::EQ);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}')dnl
|
||||
define(`CAS_INSN3',
|
||||
`
|
||||
instruct weakCompareAndSwap$1$5(iRegINoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
|
||||
match(Set res (WeakCompareAndSwap$1 mem (Binary oldval newval)));
|
||||
ifelse($5,Acq,' predicate(needs_acquiring_load_exclusive(n));
|
||||
ins_cost(VOLATILE_REF_COST);`,' ins_cost(2 * VOLATILE_REF_COST);`)
|
||||
effect(KILL cr);
|
||||
format %{
|
||||
"cmpxchg $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
|
||||
Assembler::$4, /*acquire*/ ifelse($5,Acq,true,false), /*release*/ true,
|
||||
/*weak*/ true, noreg);
|
||||
__ csetw($res$$Register, Assembler::EQ);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}')dnl
|
||||
CAS_INSN2(B,I,byte,byte,bw)
|
||||
CAS_INSN2(S,I,short,halfword,hw)
|
||||
CAS_INSN3(I,I,int,word)
|
||||
CAS_INSN3(L,L,long,xword)
|
||||
CAS_INSN3(N,N,narrow oop,word)
|
||||
CAS_INSN3(P,P,ptr,xword)
|
||||
dnl CAS_INSN2(B,I,byte,byte,bw,Acq)
|
||||
dnl CAS_INSN2(S,I,short,halfword,hw,Acq)
|
||||
dnl CAS_INSN3(I,I,int,word,Acq)
|
||||
dnl CAS_INSN3(L,L,long,xword,Acq)
|
||||
dnl CAS_INSN3(N,N,narrow oop,word,Acq)
|
||||
dnl CAS_INSN3(P,P,ptr,xword,Acq)
|
||||
dnl
|
@ -70,11 +70,7 @@ define_pd_global(uintx, CMSYoungGenPerWorker, 64*M); // default max size of CMS
|
||||
|
||||
define_pd_global(uintx, TypeProfileLevel, 111);
|
||||
|
||||
// No performance work done here yet.
|
||||
define_pd_global(bool, CompactStrings, false);
|
||||
|
||||
// avoid biased locking while we are bootstrapping the aarch64 build
|
||||
define_pd_global(bool, UseBiasedLocking, false);
|
||||
define_pd_global(bool, CompactStrings, true);
|
||||
|
||||
// Clear short arrays bigger than one word in an arch-specific way
|
||||
define_pd_global(intx, InitArrayShortSize, BytesPerLong);
|
||||
@ -118,6 +114,7 @@ define_pd_global(intx, InlineSmallCode, 1000);
|
||||
// Don't attempt to use Neon on builtin sim until builtin sim supports it
|
||||
#define UseCRC32 false
|
||||
#define UseSIMDForMemoryOps false
|
||||
#define AvoidUnalignedAcesses false
|
||||
|
||||
#else
|
||||
#define UseBuiltinSim false
|
||||
@ -144,6 +141,8 @@ define_pd_global(intx, InlineSmallCode, 1000);
|
||||
"Use CRC32 instructions for CRC32 computation") \
|
||||
product(bool, UseSIMDForMemoryOps, false, \
|
||||
"Use SIMD instructions in generated memory move code") \
|
||||
product(bool, AvoidUnalignedAccesses, false, \
|
||||
"Avoid generating unaligned memory accesses") \
|
||||
product(bool, UseLSE, false, \
|
||||
"Use LSE instructions") \
|
||||
product(bool, UseBlockZeroing, true, \
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "opto/compile.hpp"
|
||||
#include "opto/intrinsicnode.hpp"
|
||||
#include "opto/node.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
@ -565,11 +566,6 @@ void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, La
|
||||
br(Assembler::EQ, done);
|
||||
}
|
||||
|
||||
|
||||
// added to make this compile
|
||||
|
||||
REGISTER_DEFINITION(Register, noreg);
|
||||
|
||||
static void pass_arg0(MacroAssembler* masm, Register arg) {
|
||||
if (c_rarg0 != arg ) {
|
||||
masm->mov(c_rarg0, arg);
|
||||
@ -2145,30 +2141,40 @@ void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Regis
|
||||
b(*fail);
|
||||
}
|
||||
|
||||
// A generic CAS; success or failure is in the EQ flag.
|
||||
// A generic CAS; success or failure is in the EQ flag. A weak CAS
|
||||
// doesn't retry and may fail spuriously. If the oldval is wanted,
|
||||
// Pass a register for the result, otherwise pass noreg.
|
||||
|
||||
// Clobbers rscratch1
|
||||
void MacroAssembler::cmpxchg(Register addr, Register expected,
|
||||
Register new_val,
|
||||
enum operand_size size,
|
||||
bool acquire, bool release,
|
||||
Register tmp) {
|
||||
bool weak,
|
||||
Register result) {
|
||||
if (result == noreg) result = rscratch1;
|
||||
if (UseLSE) {
|
||||
mov(tmp, expected);
|
||||
lse_cas(tmp, new_val, addr, size, acquire, release, /*not_pair*/ true);
|
||||
cmp(tmp, expected);
|
||||
mov(result, expected);
|
||||
lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true);
|
||||
cmp(result, expected);
|
||||
} else {
|
||||
BLOCK_COMMENT("cmpxchg {");
|
||||
Label retry_load, done;
|
||||
if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
|
||||
prfm(Address(addr), PSTL1STRM);
|
||||
bind(retry_load);
|
||||
load_exclusive(tmp, addr, size, acquire);
|
||||
load_exclusive(result, addr, size, acquire);
|
||||
if (size == xword)
|
||||
cmp(tmp, expected);
|
||||
cmp(result, expected);
|
||||
else
|
||||
cmpw(tmp, expected);
|
||||
cmpw(result, expected);
|
||||
br(Assembler::NE, done);
|
||||
store_exclusive(tmp, new_val, addr, size, release);
|
||||
cbnzw(tmp, retry_load);
|
||||
store_exclusive(rscratch1, new_val, addr, size, release);
|
||||
if (weak) {
|
||||
cmpw(rscratch1, 0u); // If the store fails, return NE to our caller.
|
||||
} else {
|
||||
cbnzw(rscratch1, retry_load);
|
||||
}
|
||||
bind(done);
|
||||
BLOCK_COMMENT("} cmpxchg");
|
||||
}
|
||||
@ -4500,21 +4506,49 @@ void MacroAssembler::string_indexof(Register str2, Register str1,
|
||||
BIND(DONE);
|
||||
}
|
||||
|
||||
typedef void (MacroAssembler::* chr_insn)(Register Rt, const Address &adr);
|
||||
typedef void (MacroAssembler::* uxt_insn)(Register Rd, Register Rn);
|
||||
|
||||
// Compare strings.
|
||||
void MacroAssembler::string_compare(Register str1, Register str2,
|
||||
Register cnt1, Register cnt2, Register result,
|
||||
Register tmp1) {
|
||||
Register tmp1,
|
||||
FloatRegister vtmp, FloatRegister vtmpZ, int ae) {
|
||||
Label LENGTH_DIFF, DONE, SHORT_LOOP, SHORT_STRING,
|
||||
NEXT_WORD, DIFFERENCE;
|
||||
|
||||
bool isLL = ae == StrIntrinsicNode::LL;
|
||||
bool isLU = ae == StrIntrinsicNode::LU;
|
||||
bool isUL = ae == StrIntrinsicNode::UL;
|
||||
|
||||
bool str1_isL = isLL || isLU;
|
||||
bool str2_isL = isLL || isUL;
|
||||
|
||||
int str1_chr_shift = str1_isL ? 0 : 1;
|
||||
int str2_chr_shift = str2_isL ? 0 : 1;
|
||||
int str1_chr_size = str1_isL ? 1 : 2;
|
||||
int str2_chr_size = str2_isL ? 1 : 2;
|
||||
|
||||
chr_insn str1_load_chr = str1_isL ? (chr_insn)&MacroAssembler::ldrb :
|
||||
(chr_insn)&MacroAssembler::ldrh;
|
||||
chr_insn str2_load_chr = str2_isL ? (chr_insn)&MacroAssembler::ldrb :
|
||||
(chr_insn)&MacroAssembler::ldrh;
|
||||
uxt_insn ext_chr = isLL ? (uxt_insn)&MacroAssembler::uxtbw :
|
||||
(uxt_insn)&MacroAssembler::uxthw;
|
||||
|
||||
BLOCK_COMMENT("string_compare {");
|
||||
|
||||
// Bizzarely, the counts are passed in bytes, regardless of whether they
|
||||
// are L or U strings, however the result is always in characters.
|
||||
if (!str1_isL) asrw(cnt1, cnt1, 1);
|
||||
if (!str2_isL) asrw(cnt2, cnt2, 1);
|
||||
|
||||
// Compute the minimum of the string lengths and save the difference.
|
||||
subsw(tmp1, cnt1, cnt2);
|
||||
cselw(cnt2, cnt1, cnt2, Assembler::LE); // min
|
||||
|
||||
// A very short string
|
||||
cmpw(cnt2, 4);
|
||||
cmpw(cnt2, isLL ? 8:4);
|
||||
br(Assembler::LT, SHORT_STRING);
|
||||
|
||||
// Check if the strings start at the same location.
|
||||
@ -4523,20 +4557,37 @@ void MacroAssembler::string_compare(Register str1, Register str2,
|
||||
|
||||
// Compare longwords
|
||||
{
|
||||
subw(cnt2, cnt2, 4); // The last longword is a special case
|
||||
subw(cnt2, cnt2, isLL ? 8:4); // The last longword is a special case
|
||||
|
||||
// Move both string pointers to the last longword of their
|
||||
// strings, negate the remaining count, and convert it to bytes.
|
||||
lea(str1, Address(str1, cnt2, Address::uxtw(1)));
|
||||
lea(str2, Address(str2, cnt2, Address::uxtw(1)));
|
||||
sub(cnt2, zr, cnt2, LSL, 1);
|
||||
lea(str1, Address(str1, cnt2, Address::uxtw(str1_chr_shift)));
|
||||
lea(str2, Address(str2, cnt2, Address::uxtw(str2_chr_shift)));
|
||||
if (isLU || isUL) {
|
||||
sub(cnt1, zr, cnt2, LSL, str1_chr_shift);
|
||||
eor(vtmpZ, T16B, vtmpZ, vtmpZ);
|
||||
}
|
||||
sub(cnt2, zr, cnt2, LSL, str2_chr_shift);
|
||||
|
||||
// Loop, loading longwords and comparing them into rscratch2.
|
||||
bind(NEXT_WORD);
|
||||
ldr(result, Address(str1, cnt2));
|
||||
ldr(cnt1, Address(str2, cnt2));
|
||||
adds(cnt2, cnt2, wordSize);
|
||||
eor(rscratch2, result, cnt1);
|
||||
if (isLU) {
|
||||
ldrs(vtmp, Address(str1, cnt1));
|
||||
zip1(vtmp, T8B, vtmp, vtmpZ);
|
||||
umov(result, vtmp, D, 0);
|
||||
} else {
|
||||
ldr(result, Address(str1, isUL ? cnt1:cnt2));
|
||||
}
|
||||
if (isUL) {
|
||||
ldrs(vtmp, Address(str2, cnt2));
|
||||
zip1(vtmp, T8B, vtmp, vtmpZ);
|
||||
umov(rscratch1, vtmp, D, 0);
|
||||
} else {
|
||||
ldr(rscratch1, Address(str2, cnt2));
|
||||
}
|
||||
adds(cnt2, cnt2, isUL ? 4:8);
|
||||
if (isLU || isUL) add(cnt1, cnt1, isLU ? 4:8);
|
||||
eor(rscratch2, result, rscratch1);
|
||||
cbnz(rscratch2, DIFFERENCE);
|
||||
br(Assembler::LT, NEXT_WORD);
|
||||
|
||||
@ -4544,9 +4595,21 @@ void MacroAssembler::string_compare(Register str1, Register str2,
|
||||
// same longword twice, but that's still faster than another
|
||||
// conditional branch.
|
||||
|
||||
ldr(result, Address(str1));
|
||||
ldr(cnt1, Address(str2));
|
||||
eor(rscratch2, result, cnt1);
|
||||
if (isLU) {
|
||||
ldrs(vtmp, Address(str1));
|
||||
zip1(vtmp, T8B, vtmp, vtmpZ);
|
||||
umov(result, vtmp, D, 0);
|
||||
} else {
|
||||
ldr(result, Address(str1));
|
||||
}
|
||||
if (isUL) {
|
||||
ldrs(vtmp, Address(str2));
|
||||
zip1(vtmp, T8B, vtmp, vtmpZ);
|
||||
umov(rscratch1, vtmp, D, 0);
|
||||
} else {
|
||||
ldr(rscratch1, Address(str2));
|
||||
}
|
||||
eor(rscratch2, result, rscratch1);
|
||||
cbz(rscratch2, LENGTH_DIFF);
|
||||
|
||||
// Find the first different characters in the longwords and
|
||||
@ -4554,12 +4617,12 @@ void MacroAssembler::string_compare(Register str1, Register str2,
|
||||
bind(DIFFERENCE);
|
||||
rev(rscratch2, rscratch2);
|
||||
clz(rscratch2, rscratch2);
|
||||
andr(rscratch2, rscratch2, -16);
|
||||
andr(rscratch2, rscratch2, isLL ? -8 : -16);
|
||||
lsrv(result, result, rscratch2);
|
||||
uxthw(result, result);
|
||||
lsrv(cnt1, cnt1, rscratch2);
|
||||
uxthw(cnt1, cnt1);
|
||||
subw(result, result, cnt1);
|
||||
(this->*ext_chr)(result, result);
|
||||
lsrv(rscratch1, rscratch1, rscratch2);
|
||||
(this->*ext_chr)(rscratch1, rscratch1);
|
||||
subw(result, result, rscratch1);
|
||||
b(DONE);
|
||||
}
|
||||
|
||||
@ -4568,8 +4631,8 @@ void MacroAssembler::string_compare(Register str1, Register str2,
|
||||
cbz(cnt2, LENGTH_DIFF);
|
||||
|
||||
bind(SHORT_LOOP);
|
||||
load_unsigned_short(result, Address(post(str1, 2)));
|
||||
load_unsigned_short(cnt1, Address(post(str2, 2)));
|
||||
(this->*str1_load_chr)(result, Address(post(str1, str1_chr_size)));
|
||||
(this->*str2_load_chr)(cnt1, Address(post(str2, str2_chr_size)));
|
||||
subw(result, result, cnt1);
|
||||
cbnz(result, DONE);
|
||||
sub(cnt2, cnt2, 1);
|
||||
|
@ -995,10 +995,11 @@ public:
|
||||
}
|
||||
|
||||
// A generic CAS; success or failure is in the EQ flag.
|
||||
// Clobbers rscratch1
|
||||
void cmpxchg(Register addr, Register expected, Register new_val,
|
||||
enum operand_size size,
|
||||
bool acquire, bool release,
|
||||
Register tmp = rscratch1);
|
||||
bool acquire, bool release, bool weak,
|
||||
Register result);
|
||||
|
||||
// Calls
|
||||
|
||||
@ -1198,7 +1199,8 @@ public:
|
||||
|
||||
void string_compare(Register str1, Register str2,
|
||||
Register cnt1, Register cnt2, Register result,
|
||||
Register tmp1);
|
||||
Register tmp1,
|
||||
FloatRegister vtmp, FloatRegister vtmpZ, int ae);
|
||||
|
||||
void arrays_equals(Register a1, Register a2,
|
||||
Register result, Register cnt1,
|
||||
|
@ -29,6 +29,8 @@
|
||||
#include "register_aarch64.hpp"
|
||||
# include "interp_masm_aarch64.hpp"
|
||||
|
||||
REGISTER_DEFINITION(Register, noreg);
|
||||
|
||||
REGISTER_DEFINITION(Register, r0);
|
||||
REGISTER_DEFINITION(Register, r1);
|
||||
REGISTER_DEFINITION(Register, r2);
|
||||
@ -62,6 +64,8 @@ REGISTER_DEFINITION(Register, r29);
|
||||
REGISTER_DEFINITION(Register, r30);
|
||||
REGISTER_DEFINITION(Register, sp);
|
||||
|
||||
REGISTER_DEFINITION(FloatRegister, fnoreg);
|
||||
|
||||
REGISTER_DEFINITION(FloatRegister, v0);
|
||||
REGISTER_DEFINITION(FloatRegister, v1);
|
||||
REGISTER_DEFINITION(FloatRegister, v2);
|
||||
|
@ -801,6 +801,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubCodeMark mark(this, "StubRoutines", stub_name);
|
||||
__ align(CodeEntryAlignment);
|
||||
__ bind(start);
|
||||
|
||||
Label unaligned_copy_long;
|
||||
if (AvoidUnalignedAccesses) {
|
||||
__ tbnz(d, 3, unaligned_copy_long);
|
||||
}
|
||||
|
||||
if (direction == copy_forwards) {
|
||||
__ sub(s, s, bias);
|
||||
__ sub(d, d, bias);
|
||||
@ -901,6 +907,198 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
__ ret(lr);
|
||||
|
||||
if (AvoidUnalignedAccesses) {
|
||||
Label drain, again;
|
||||
// Register order for storing. Order is different for backward copy.
|
||||
|
||||
__ bind(unaligned_copy_long);
|
||||
|
||||
// source address is even aligned, target odd aligned
|
||||
//
|
||||
// when forward copying word pairs we read long pairs at offsets
|
||||
// {0, 2, 4, 6} (in long words). when backwards copying we read
|
||||
// long pairs at offsets {-2, -4, -6, -8}. We adjust the source
|
||||
// address by -2 in the forwards case so we can compute the
|
||||
// source offsets for both as {2, 4, 6, 8} * unit where unit = 1
|
||||
// or -1.
|
||||
//
|
||||
// when forward copying we need to store 1 word, 3 pairs and
|
||||
// then 1 word at offsets {0, 1, 3, 5, 7}. Rather thna use a
|
||||
// zero offset We adjust the destination by -1 which means we
|
||||
// have to use offsets { 1, 2, 4, 6, 8} * unit for the stores.
|
||||
//
|
||||
// When backwards copyng we need to store 1 word, 3 pairs and
|
||||
// then 1 word at offsets {-1, -3, -5, -7, -8} i.e. we use
|
||||
// offsets {1, 3, 5, 7, 8} * unit.
|
||||
|
||||
if (direction == copy_forwards) {
|
||||
__ sub(s, s, 16);
|
||||
__ sub(d, d, 8);
|
||||
}
|
||||
|
||||
// Fill 8 registers
|
||||
//
|
||||
// for forwards copy s was offset by -16 from the original input
|
||||
// value of s so the register contents are at these offsets
|
||||
// relative to the 64 bit block addressed by that original input
|
||||
// and so on for each successive 64 byte block when s is updated
|
||||
//
|
||||
// t0 at offset 0, t1 at offset 8
|
||||
// t2 at offset 16, t3 at offset 24
|
||||
// t4 at offset 32, t5 at offset 40
|
||||
// t6 at offset 48, t7 at offset 56
|
||||
|
||||
// for backwards copy s was not offset so the register contents
|
||||
// are at these offsets into the preceding 64 byte block
|
||||
// relative to that original input and so on for each successive
|
||||
// preceding 64 byte block when s is updated. this explains the
|
||||
// slightly counter-intuitive looking pattern of register usage
|
||||
// in the stp instructions for backwards copy.
|
||||
//
|
||||
// t0 at offset -16, t1 at offset -8
|
||||
// t2 at offset -32, t3 at offset -24
|
||||
// t4 at offset -48, t5 at offset -40
|
||||
// t6 at offset -64, t7 at offset -56
|
||||
|
||||
__ ldp(t0, t1, Address(s, 2 * unit));
|
||||
__ ldp(t2, t3, Address(s, 4 * unit));
|
||||
__ ldp(t4, t5, Address(s, 6 * unit));
|
||||
__ ldp(t6, t7, Address(__ pre(s, 8 * unit)));
|
||||
|
||||
__ subs(count, count, 16);
|
||||
__ br(Assembler::LO, drain);
|
||||
|
||||
int prefetch = PrefetchCopyIntervalInBytes;
|
||||
bool use_stride = false;
|
||||
if (direction == copy_backwards) {
|
||||
use_stride = prefetch > 256;
|
||||
prefetch = -prefetch;
|
||||
if (use_stride) __ mov(stride, prefetch);
|
||||
}
|
||||
|
||||
__ bind(again);
|
||||
|
||||
if (PrefetchCopyIntervalInBytes > 0)
|
||||
__ prfm(use_stride ? Address(s, stride) : Address(s, prefetch), PLDL1KEEP);
|
||||
|
||||
if (direction == copy_forwards) {
|
||||
// allowing for the offset of -8 the store instructions place
|
||||
// registers into the target 64 bit block at the following
|
||||
// offsets
|
||||
//
|
||||
// t0 at offset 0
|
||||
// t1 at offset 8, t2 at offset 16
|
||||
// t3 at offset 24, t4 at offset 32
|
||||
// t5 at offset 40, t6 at offset 48
|
||||
// t7 at offset 56
|
||||
|
||||
__ str(t0, Address(d, 1 * unit));
|
||||
__ stp(t1, t2, Address(d, 2 * unit));
|
||||
__ ldp(t0, t1, Address(s, 2 * unit));
|
||||
__ stp(t3, t4, Address(d, 4 * unit));
|
||||
__ ldp(t2, t3, Address(s, 4 * unit));
|
||||
__ stp(t5, t6, Address(d, 6 * unit));
|
||||
__ ldp(t4, t5, Address(s, 6 * unit));
|
||||
__ str(t7, Address(__ pre(d, 8 * unit)));
|
||||
__ ldp(t6, t7, Address(__ pre(s, 8 * unit)));
|
||||
} else {
|
||||
// d was not offset when we started so the registers are
|
||||
// written into the 64 bit block preceding d with the following
|
||||
// offsets
|
||||
//
|
||||
// t1 at offset -8
|
||||
// t3 at offset -24, t0 at offset -16
|
||||
// t5 at offset -48, t2 at offset -32
|
||||
// t7 at offset -56, t4 at offset -48
|
||||
// t6 at offset -64
|
||||
//
|
||||
// note that this matches the offsets previously noted for the
|
||||
// loads
|
||||
|
||||
__ str(t1, Address(d, 1 * unit));
|
||||
__ stp(t3, t0, Address(d, 3 * unit));
|
||||
__ ldp(t0, t1, Address(s, 2 * unit));
|
||||
__ stp(t5, t2, Address(d, 5 * unit));
|
||||
__ ldp(t2, t3, Address(s, 4 * unit));
|
||||
__ stp(t7, t4, Address(d, 7 * unit));
|
||||
__ ldp(t4, t5, Address(s, 6 * unit));
|
||||
__ str(t6, Address(__ pre(d, 8 * unit)));
|
||||
__ ldp(t6, t7, Address(__ pre(s, 8 * unit)));
|
||||
}
|
||||
|
||||
__ subs(count, count, 8);
|
||||
__ br(Assembler::HS, again);
|
||||
|
||||
// Drain
|
||||
//
|
||||
// this uses the same pattern of offsets and register arguments
|
||||
// as above
|
||||
__ bind(drain);
|
||||
if (direction == copy_forwards) {
|
||||
__ str(t0, Address(d, 1 * unit));
|
||||
__ stp(t1, t2, Address(d, 2 * unit));
|
||||
__ stp(t3, t4, Address(d, 4 * unit));
|
||||
__ stp(t5, t6, Address(d, 6 * unit));
|
||||
__ str(t7, Address(__ pre(d, 8 * unit)));
|
||||
} else {
|
||||
__ str(t1, Address(d, 1 * unit));
|
||||
__ stp(t3, t0, Address(d, 3 * unit));
|
||||
__ stp(t5, t2, Address(d, 5 * unit));
|
||||
__ stp(t7, t4, Address(d, 7 * unit));
|
||||
__ str(t6, Address(__ pre(d, 8 * unit)));
|
||||
}
|
||||
// now we need to copy any remaining part block which may
|
||||
// include a 4 word block subblock and/or a 2 word subblock.
|
||||
// bits 2 and 1 in the count are the tell-tale for whetehr we
|
||||
// have each such subblock
|
||||
{
|
||||
Label L1, L2;
|
||||
__ tbz(count, exact_log2(4), L1);
|
||||
// this is the same as above but copying only 4 longs hence
|
||||
// with ony one intervening stp between the str instructions
|
||||
// but note that the offsets and registers still follow the
|
||||
// same pattern
|
||||
__ ldp(t0, t1, Address(s, 2 * unit));
|
||||
__ ldp(t2, t3, Address(__ pre(s, 4 * unit)));
|
||||
if (direction == copy_forwards) {
|
||||
__ str(t0, Address(d, 1 * unit));
|
||||
__ stp(t1, t2, Address(d, 2 * unit));
|
||||
__ str(t3, Address(__ pre(d, 4 * unit)));
|
||||
} else {
|
||||
__ str(t1, Address(d, 1 * unit));
|
||||
__ stp(t3, t0, Address(d, 3 * unit));
|
||||
__ str(t2, Address(__ pre(d, 4 * unit)));
|
||||
}
|
||||
__ bind(L1);
|
||||
|
||||
__ tbz(count, 1, L2);
|
||||
// this is the same as above but copying only 2 longs hence
|
||||
// there is no intervening stp between the str instructions
|
||||
// but note that the offset and register patterns are still
|
||||
// the same
|
||||
__ ldp(t0, t1, Address(__ pre(s, 2 * unit)));
|
||||
if (direction == copy_forwards) {
|
||||
__ str(t0, Address(d, 1 * unit));
|
||||
__ str(t1, Address(__ pre(d, 2 * unit)));
|
||||
} else {
|
||||
__ str(t1, Address(d, 1 * unit));
|
||||
__ str(t0, Address(__ pre(d, 2 * unit)));
|
||||
}
|
||||
__ bind(L2);
|
||||
|
||||
// for forwards copy we need to re-adjust the offsets we
|
||||
// applied so that s and d are follow the last words written
|
||||
|
||||
if (direction == copy_forwards) {
|
||||
__ add(s, s, 16);
|
||||
__ add(d, d, 8);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
__ ret(lr);
|
||||
}
|
||||
}
|
||||
|
||||
// Small copy: less than 16 bytes.
|
||||
@ -1024,11 +1222,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// (96 bytes if SIMD because we do 32 byes per instruction)
|
||||
__ bind(copy80);
|
||||
if (UseSIMDForMemoryOps) {
|
||||
__ ldpq(v0, v1, Address(s, 0));
|
||||
__ ldpq(v2, v3, Address(s, 32));
|
||||
__ ld4(v0, v1, v2, v3, __ T16B, Address(s, 0));
|
||||
__ ldpq(v4, v5, Address(send, -32));
|
||||
__ stpq(v0, v1, Address(d, 0));
|
||||
__ stpq(v2, v3, Address(d, 32));
|
||||
__ st4(v0, v1, v2, v3, __ T16B, Address(d, 0));
|
||||
__ stpq(v4, v5, Address(dend, -32));
|
||||
} else {
|
||||
__ ldp(t0, t1, Address(s, 0));
|
||||
|
@ -437,6 +437,21 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
|
||||
__ restore_locals();
|
||||
__ restore_constant_pool_cache();
|
||||
__ get_method(rmethod);
|
||||
__ get_dispatch();
|
||||
|
||||
// Calculate stack limit
|
||||
__ ldr(rscratch1, Address(rmethod, Method::const_offset()));
|
||||
__ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
|
||||
__ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
|
||||
__ ldr(rscratch2,
|
||||
Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
|
||||
__ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
|
||||
__ andr(sp, rscratch1, -16);
|
||||
|
||||
// Restore expression stack pointer
|
||||
__ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
// NULL last_sp until next java call
|
||||
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
// Check if we need to take lock at entry of synchronized method.
|
||||
@ -463,22 +478,6 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
|
||||
__ bind(L);
|
||||
}
|
||||
|
||||
__ get_dispatch();
|
||||
|
||||
// Calculate stack limit
|
||||
__ ldr(rscratch1, Address(rmethod, Method::const_offset()));
|
||||
__ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
|
||||
__ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
|
||||
__ ldr(rscratch2,
|
||||
Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
|
||||
__ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
|
||||
__ andr(sp, rscratch1, -16);
|
||||
|
||||
// Restore expression stack pointer
|
||||
__ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
// NULL last_sp until next java call
|
||||
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
|
||||
__ dispatch_next(state, step);
|
||||
return entry;
|
||||
}
|
||||
|
@ -2434,7 +2434,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
__ ldrsb(r0, field);
|
||||
__ push(ztos);
|
||||
// Rewrite bytecode to be faster
|
||||
if (!is_static) {
|
||||
if (rc == may_rewrite) {
|
||||
// use btos rewriting, no truncating to t/f bit is needed for getfield.
|
||||
patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
|
||||
}
|
||||
@ -2670,7 +2670,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
if (!is_static) pop_and_check_object(obj);
|
||||
__ andw(r0, r0, 0x1);
|
||||
__ strb(r0, field);
|
||||
if (!is_static) {
|
||||
if (rc == may_rewrite) {
|
||||
patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
|
||||
}
|
||||
__ b(Done);
|
||||
|
@ -175,7 +175,15 @@ void VM_Version::get_processor_features() {
|
||||
}
|
||||
|
||||
// Enable vendor specific features
|
||||
if (_cpu == CPU_CAVIUM && _variant == 0) _features |= CPU_DMB_ATOMICS;
|
||||
if (_cpu == CPU_CAVIUM) {
|
||||
if (_variant == 0) _features |= CPU_DMB_ATOMICS;
|
||||
if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
|
||||
FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
|
||||
FLAG_SET_DEFAULT(UseSIMDForMemoryOps, (_variant > 0));
|
||||
}
|
||||
}
|
||||
if (_cpu == CPU_ARM && (_model == 0xd03 || _model2 == 0xd03)) _features |= CPU_A53MAC;
|
||||
if (_cpu == CPU_ARM && (_model == 0xd07 || _model2 == 0xd07)) _features |= CPU_STXR_PREFETCH;
|
||||
// If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07)
|
||||
|
64
hotspot/src/cpu/ppc/vm/c1_LIR_ppc.cpp
Normal file
64
hotspot/src/cpu/ppc/vm/c1_LIR_ppc.cpp
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/register.hpp"
|
||||
#include "c1/c1_LIR.hpp"
|
||||
|
||||
FloatRegister LIR_OprDesc::as_float_reg() const {
|
||||
return as_FloatRegister(fpu_regnr());
|
||||
}
|
||||
|
||||
FloatRegister LIR_OprDesc::as_double_reg() const {
|
||||
return as_FloatRegister(fpu_regnrLo());
|
||||
}
|
||||
|
||||
// Reg2 unused.
|
||||
LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
|
||||
assert(!as_FloatRegister(reg2)->is_valid(), "Not used on this platform");
|
||||
return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
|
||||
(reg1 << LIR_OprDesc::reg2_shift) |
|
||||
LIR_OprDesc::double_type |
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::double_size);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void LIR_Address::verify() const {
|
||||
assert(scale() == times_1, "Scaled addressing mode not available on PPC and should not be used");
|
||||
assert(disp() == 0 || index()->is_illegal(), "can't have both");
|
||||
#ifdef _LP64
|
||||
assert(base()->is_cpu_register(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#else
|
||||
assert(base()->is_single_cpu(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#endif
|
||||
}
|
||||
#endif // PRODUCT
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2014 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -76,7 +76,7 @@ class RegisterImpl;
|
||||
typedef RegisterImpl* Register;
|
||||
|
||||
inline Register as_Register(int encoding) {
|
||||
assert(encoding >= 0 && encoding < 32, "bad register encoding");
|
||||
assert(encoding >= -1 && encoding < 32, "bad register encoding");
|
||||
return (Register)(intptr_t)encoding;
|
||||
}
|
||||
|
||||
@ -91,7 +91,7 @@ class RegisterImpl: public AbstractRegisterImpl {
|
||||
inline friend Register as_Register(int encoding);
|
||||
|
||||
// accessors
|
||||
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
|
||||
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
|
||||
inline VMReg as_VMReg();
|
||||
Register successor() const { return as_Register(encoding() + 1); }
|
||||
|
||||
@ -247,7 +247,7 @@ class FloatRegisterImpl;
|
||||
typedef FloatRegisterImpl* FloatRegister;
|
||||
|
||||
inline FloatRegister as_FloatRegister(int encoding) {
|
||||
assert(encoding >= 0 && encoding < 32, "bad float register encoding");
|
||||
assert(encoding >= -1 && encoding < 32, "bad float register encoding");
|
||||
return (FloatRegister)(intptr_t)encoding;
|
||||
}
|
||||
|
||||
@ -267,7 +267,7 @@ class FloatRegisterImpl: public AbstractRegisterImpl {
|
||||
FloatRegister successor() const { return as_FloatRegister(encoding() + 1); }
|
||||
|
||||
// testers
|
||||
bool is_valid() const { return (0 <= value() && value() < number_of_registers); }
|
||||
bool is_valid() const { return (0 <= value() && value() < number_of_registers); }
|
||||
|
||||
const char* name() const;
|
||||
};
|
||||
|
@ -881,10 +881,6 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_f
|
||||
BLOCK_COMMENT("} stack_overflow_check_with_compare");
|
||||
}
|
||||
|
||||
void TemplateInterpreterGenerator::unlock_method(bool check_exceptions) {
|
||||
__ unlock_object(R26_monitor, check_exceptions);
|
||||
}
|
||||
|
||||
// Lock the current method, interpreter register window must be set up!
|
||||
void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) {
|
||||
const Register Robj_to_lock = Rscratch2;
|
||||
@ -1566,7 +1562,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
if (synchronized) {
|
||||
// Don't check for exceptions since we're still in the i2n frame. Do that
|
||||
// manually afterwards.
|
||||
unlock_method(false);
|
||||
__ unlock_object(R26_monitor, false); // Can also unlock methods.
|
||||
}
|
||||
|
||||
// Reset active handles after returning from native.
|
||||
@ -1609,7 +1605,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
if (synchronized) {
|
||||
// Don't check for exceptions since we're still in the i2n frame. Do that
|
||||
// manually afterwards.
|
||||
unlock_method(false);
|
||||
__ unlock_object(R26_monitor, false); // Can also unlock methods.
|
||||
}
|
||||
BIND(exception_return_sync_check_already_unlocked);
|
||||
|
||||
|
@ -1668,9 +1668,13 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
__ lwz(Rscratch3, in_bytes(MethodData::backedge_mask_offset()), Rmdo);
|
||||
__ addi(Rscratch2, Rscratch2, increment);
|
||||
__ stw(Rscratch2, mdo_bc_offs, Rmdo);
|
||||
__ and_(Rscratch3, Rscratch2, Rscratch3);
|
||||
__ bne(CCR0, Lforward);
|
||||
__ b(Loverflow);
|
||||
if (UseOnStackReplacement) {
|
||||
__ and_(Rscratch3, Rscratch2, Rscratch3);
|
||||
__ bne(CCR0, Lforward);
|
||||
__ b(Loverflow);
|
||||
} else {
|
||||
__ b(Lforward);
|
||||
}
|
||||
}
|
||||
|
||||
// If there's no MDO, increment counter in method.
|
||||
@ -1680,9 +1684,12 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
__ lwz(Rscratch3, in_bytes(MethodCounters::backedge_mask_offset()), R4_counters);
|
||||
__ addi(Rscratch2, Rscratch2, increment);
|
||||
__ stw(Rscratch2, mo_bc_offs, R4_counters);
|
||||
__ and_(Rscratch3, Rscratch2, Rscratch3);
|
||||
__ bne(CCR0, Lforward);
|
||||
|
||||
if (UseOnStackReplacement) {
|
||||
__ and_(Rscratch3, Rscratch2, Rscratch3);
|
||||
__ bne(CCR0, Lforward);
|
||||
} else {
|
||||
__ b(Lforward);
|
||||
}
|
||||
__ bind(Loverflow);
|
||||
|
||||
// Notify point for loop, pass branch bytecode.
|
||||
|
@ -243,7 +243,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
||||
}
|
||||
|
||||
int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
|
||||
if (TraceJumps || DebugVtables || CountCompiledCalls || VerifyOops) {
|
||||
if (DebugVtables || CountCompiledCalls || VerifyOops) {
|
||||
return 1000;
|
||||
} else {
|
||||
int decode_klass_size = MacroAssembler::instr_size_for_decode_klass_not_null();
|
||||
|
63
hotspot/src/cpu/sparc/vm/c1_LIR_sparc.cpp
Normal file
63
hotspot/src/cpu/sparc/vm/c1_LIR_sparc.cpp
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/register.hpp"
|
||||
#include "c1/c1_FrameMap.hpp"
|
||||
#include "c1/c1_LIR.hpp"
|
||||
|
||||
FloatRegister LIR_OprDesc::as_float_reg() const {
|
||||
return FrameMap::nr2floatreg(fpu_regnr());
|
||||
}
|
||||
|
||||
FloatRegister LIR_OprDesc::as_double_reg() const {
|
||||
return FrameMap::nr2floatreg(fpu_regnrHi());
|
||||
}
|
||||
|
||||
LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
|
||||
assert(as_FloatRegister(reg2) != fnoreg, "Sparc holds double in two regs.");
|
||||
return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
|
||||
(reg2 << LIR_OprDesc::reg2_shift) |
|
||||
LIR_OprDesc::double_type |
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::double_size);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void LIR_Address::verify() const {
|
||||
assert(scale() == times_1, "Scaled addressing mode not available on SPARC and should not be used");
|
||||
assert(disp() == 0 || index()->is_illegal(), "can't have both");
|
||||
#ifdef _LP64
|
||||
assert(base()->is_cpu_register(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#else
|
||||
assert(base()->is_single_cpu(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#endif
|
||||
}
|
||||
#endif // PRODUCT
|
@ -77,8 +77,7 @@ int CompiledStaticCall::to_interp_stub_size() {
|
||||
// This doesn't need to be accurate but it must be larger or equal to
|
||||
// the real size of the stub.
|
||||
return (NativeMovConstReg::instruction_size + // sethi/setlo;
|
||||
NativeJump::instruction_size + // sethi; jmp; nop
|
||||
(TraceJumps ? 20 * BytesPerInstWord : 0) );
|
||||
NativeJump::instruction_size); // sethi; jmp; nop
|
||||
}
|
||||
|
||||
// Relocation entries for call stub, compiled java to interpreter.
|
||||
|
@ -32,7 +32,7 @@ const int StackAlignmentInBytes = (2*wordSize);
|
||||
|
||||
// Indicates whether the C calling conventions require that
|
||||
// 32-bit integer argument values are extended to 64 bits.
|
||||
const bool CCallingConventionRequiresIntsAsLongs = false;
|
||||
const bool CCallingConventionRequiresIntsAsLongs = true;
|
||||
|
||||
#define SUPPORTS_NATIVE_CX8
|
||||
|
||||
|
@ -33,12 +33,10 @@
|
||||
|
||||
int InlineCacheBuffer::ic_stub_code_size() {
|
||||
#ifdef _LP64
|
||||
if (TraceJumps) return 600 * wordSize;
|
||||
return (NativeMovConstReg::instruction_size + // sethi;add
|
||||
NativeJump::instruction_size + // sethi; jmp; delay slot
|
||||
(1*BytesPerInstWord) + 1); // flush + 1 extra byte
|
||||
#else
|
||||
if (TraceJumps) return 300 * wordSize;
|
||||
return (2+2+ 1) * wordSize + 1; // set/jump_to/nop + 1 byte so that code_end can be set in CodeBuffer
|
||||
#endif
|
||||
}
|
||||
|
@ -184,72 +184,10 @@ void MacroAssembler::null_check(Register reg, int offset) {
|
||||
|
||||
void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) {
|
||||
assert_not_delayed();
|
||||
// This can only be traceable if r1 & r2 are visible after a window save
|
||||
if (TraceJumps) {
|
||||
#ifndef PRODUCT
|
||||
save_frame(0);
|
||||
verify_thread();
|
||||
ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
|
||||
add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
|
||||
sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
|
||||
add(O2, O1, O1);
|
||||
|
||||
add(r1->after_save(), r2->after_save(), O2);
|
||||
set((intptr_t)file, O3);
|
||||
set(line, O4);
|
||||
Label L;
|
||||
// get nearby pc, store jmp target
|
||||
call(L, relocInfo::none); // No relocation for call to pc+0x8
|
||||
delayed()->st(O2, O1, 0);
|
||||
bind(L);
|
||||
|
||||
// store nearby pc
|
||||
st(O7, O1, sizeof(intptr_t));
|
||||
// store file
|
||||
st(O3, O1, 2*sizeof(intptr_t));
|
||||
// store line
|
||||
st(O4, O1, 3*sizeof(intptr_t));
|
||||
add(O0, 1, O0);
|
||||
and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
|
||||
st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
|
||||
restore();
|
||||
#endif /* PRODUCT */
|
||||
}
|
||||
jmpl(r1, r2, G0);
|
||||
}
|
||||
void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) {
|
||||
assert_not_delayed();
|
||||
// This can only be traceable if r1 is visible after a window save
|
||||
if (TraceJumps) {
|
||||
#ifndef PRODUCT
|
||||
save_frame(0);
|
||||
verify_thread();
|
||||
ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
|
||||
add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
|
||||
sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
|
||||
add(O2, O1, O1);
|
||||
|
||||
add(r1->after_save(), offset, O2);
|
||||
set((intptr_t)file, O3);
|
||||
set(line, O4);
|
||||
Label L;
|
||||
// get nearby pc, store jmp target
|
||||
call(L, relocInfo::none); // No relocation for call to pc+0x8
|
||||
delayed()->st(O2, O1, 0);
|
||||
bind(L);
|
||||
|
||||
// store nearby pc
|
||||
st(O7, O1, sizeof(intptr_t));
|
||||
// store file
|
||||
st(O3, O1, 2*sizeof(intptr_t));
|
||||
// store line
|
||||
st(O4, O1, 3*sizeof(intptr_t));
|
||||
add(O0, 1, O0);
|
||||
and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
|
||||
st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
|
||||
restore();
|
||||
#endif /* PRODUCT */
|
||||
}
|
||||
jmp(r1, offset);
|
||||
}
|
||||
|
||||
@ -260,44 +198,7 @@ void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Registe
|
||||
// variable length instruction streams.
|
||||
patchable_sethi(addrlit, temp);
|
||||
Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement.
|
||||
if (TraceJumps) {
|
||||
#ifndef PRODUCT
|
||||
// Must do the add here so relocation can find the remainder of the
|
||||
// value to be relocated.
|
||||
add(a.base(), a.disp(), a.base(), addrlit.rspec(offset));
|
||||
save_frame(0);
|
||||
verify_thread();
|
||||
ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
|
||||
add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
|
||||
sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
|
||||
add(O2, O1, O1);
|
||||
|
||||
set((intptr_t)file, O3);
|
||||
set(line, O4);
|
||||
Label L;
|
||||
|
||||
// get nearby pc, store jmp target
|
||||
call(L, relocInfo::none); // No relocation for call to pc+0x8
|
||||
delayed()->st(a.base()->after_save(), O1, 0);
|
||||
bind(L);
|
||||
|
||||
// store nearby pc
|
||||
st(O7, O1, sizeof(intptr_t));
|
||||
// store file
|
||||
st(O3, O1, 2*sizeof(intptr_t));
|
||||
// store line
|
||||
st(O4, O1, 3*sizeof(intptr_t));
|
||||
add(O0, 1, O0);
|
||||
and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
|
||||
st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
|
||||
restore();
|
||||
jmpl(a.base(), G0, d);
|
||||
#else
|
||||
jmpl(a.base(), a.disp(), d);
|
||||
#endif /* PRODUCT */
|
||||
} else {
|
||||
jmpl(a.base(), a.disp(), d);
|
||||
}
|
||||
jmpl(a.base(), a.disp(), d);
|
||||
}
|
||||
|
||||
void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) {
|
||||
|
@ -703,8 +703,8 @@ class MacroAssembler : public Assembler {
|
||||
|
||||
inline void tst( Register s );
|
||||
|
||||
inline void ret( bool trace = TraceJumps );
|
||||
inline void retl( bool trace = TraceJumps );
|
||||
inline void ret( bool trace = false );
|
||||
inline void retl( bool trace = false );
|
||||
|
||||
// Required platform-specific helpers for Label::patch_instructions.
|
||||
// They _shadow_ the declarations in AbstractAssembler, which are undefined.
|
||||
|
@ -760,8 +760,7 @@ void NativeJump::verify() {
|
||||
Register rd = inv_rd(i0);
|
||||
#ifndef _LP64
|
||||
if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
|
||||
(is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) ||
|
||||
(TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) &&
|
||||
(is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op)) &&
|
||||
inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
|
||||
rd == inv_rs1(i1))) {
|
||||
fatal("not a jump_to instruction");
|
||||
|
@ -3368,9 +3368,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||
// setup code generation tools
|
||||
// Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
|
||||
// Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
|
||||
// even larger with TraceJumps
|
||||
int pad = TraceJumps ? 512 : 0;
|
||||
CodeBuffer buffer("handler_blob", 1600 + pad, 512);
|
||||
CodeBuffer buffer("handler_blob", 1600, 512);
|
||||
MacroAssembler* masm = new MacroAssembler(&buffer);
|
||||
int frame_size_words;
|
||||
OopMapSet *oop_maps = new OopMapSet();
|
||||
@ -3462,9 +3460,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
|
||||
// setup code generation tools
|
||||
// Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
|
||||
// Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
|
||||
// even larger with TraceJumps
|
||||
int pad = TraceJumps ? 512 : 0;
|
||||
CodeBuffer buffer(name, 1600 + pad, 512);
|
||||
CodeBuffer buffer(name, 1600, 512);
|
||||
MacroAssembler* masm = new MacroAssembler(&buffer);
|
||||
int frame_size_words;
|
||||
OopMapSet *oop_maps = new OopMapSet();
|
||||
|
@ -501,16 +501,10 @@ class HandlerImpl {
|
||||
static int emit_deopt_handler(CodeBuffer& cbuf);
|
||||
|
||||
static uint size_exception_handler() {
|
||||
if (TraceJumps) {
|
||||
return (400); // just a guess
|
||||
}
|
||||
return ( NativeJump::instruction_size ); // sethi;jmp;nop
|
||||
}
|
||||
|
||||
static uint size_deopt_handler() {
|
||||
if (TraceJumps) {
|
||||
return (400); // just a guess
|
||||
}
|
||||
return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore
|
||||
}
|
||||
};
|
||||
@ -720,7 +714,7 @@ intptr_t get_offset_from_base_2(const MachNode* n, const TypePtr* atype, int dis
|
||||
return offset;
|
||||
}
|
||||
|
||||
static inline jdouble replicate_immI(int con, int count, int width) {
|
||||
static inline jlong replicate_immI(int con, int count, int width) {
|
||||
// Load a constant replicated "count" times with width "width"
|
||||
assert(count*width == 8 && width <= 4, "sanity");
|
||||
int bit_width = width * 8;
|
||||
@ -729,17 +723,15 @@ static inline jdouble replicate_immI(int con, int count, int width) {
|
||||
for (int i = 0; i < count - 1; i++) {
|
||||
val |= (val << bit_width);
|
||||
}
|
||||
jdouble dval = *((jdouble*) &val); // coerce to double type
|
||||
return dval;
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline jdouble replicate_immF(float con) {
|
||||
static inline jlong replicate_immF(float con) {
|
||||
// Replicate float con 2 times and pack into vector.
|
||||
int val = *((int*)&con);
|
||||
jlong lval = val;
|
||||
lval = (lval << 32) | (lval & 0xFFFFFFFFl);
|
||||
jdouble dval = *((jdouble*) &lval); // coerce to double type
|
||||
return dval;
|
||||
return lval;
|
||||
}
|
||||
|
||||
// Standard Sparc opcode form2 field breakdown
|
||||
@ -2661,8 +2653,7 @@ encode %{
|
||||
|
||||
// Emit stub for static call.
|
||||
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
|
||||
// Stub does not fit into scratch buffer if TraceJumps is enabled
|
||||
if (stub == NULL && !(TraceJumps && Compile::current()->in_scratch_emit_size())) {
|
||||
if (stub == NULL) {
|
||||
ciEnv::current()->record_failure("CodeCache is full");
|
||||
return;
|
||||
}
|
||||
|
@ -1560,13 +1560,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ bind(ok);
|
||||
}
|
||||
#endif
|
||||
if (TraceJumps) {
|
||||
// Move target to register that is recordable
|
||||
__ mov(Lscratch, G3_scratch);
|
||||
__ JMP(G3_scratch, 0);
|
||||
} else {
|
||||
__ jmp(Lscratch, 0);
|
||||
}
|
||||
__ jmp(Lscratch, 0);
|
||||
__ delayed()->nop();
|
||||
|
||||
|
||||
|
@ -1636,7 +1636,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
Address mask(G4_scratch, in_bytes(MethodData::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0,
|
||||
Assembler::notZero, &Lforward);
|
||||
(UseOnStackReplacement ? Assembler::notZero : Assembler::always), &Lforward);
|
||||
__ ba_short(Loverflow);
|
||||
}
|
||||
|
||||
@ -1647,7 +1647,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
Address mask(G3_method_counters, in_bytes(MethodCounters::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0,
|
||||
Assembler::notZero, &Lforward);
|
||||
(UseOnStackReplacement ? Assembler::notZero : Assembler::always), &Lforward);
|
||||
__ bind(Loverflow);
|
||||
|
||||
// notify point for loop, pass branch bytecode
|
||||
|
@ -221,7 +221,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
|
||||
|
||||
|
||||
int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
|
||||
if (TraceJumps || DebugVtables || CountCompiledCalls || VerifyOops) return 1000;
|
||||
if (DebugVtables || CountCompiledCalls || VerifyOops) return 1000;
|
||||
else {
|
||||
const int slop = 2*BytesPerInstWord; // sethi;add (needed for long offsets)
|
||||
if (is_vtable_stub) {
|
||||
|
@ -761,7 +761,6 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
|
||||
} else {
|
||||
a = new LIR_Address(obj.result(),
|
||||
offset.result(),
|
||||
LIR_Address::times_1,
|
||||
0,
|
||||
as_BasicType(type));
|
||||
}
|
||||
@ -1081,7 +1080,6 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
|
||||
|
||||
LIR_Address* a = new LIR_Address(base_op,
|
||||
index,
|
||||
LIR_Address::times_1,
|
||||
offset,
|
||||
T_BYTE);
|
||||
BasicTypeList signature(3);
|
||||
@ -1157,13 +1155,11 @@ void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
|
||||
|
||||
LIR_Address* addr_a = new LIR_Address(result_a,
|
||||
result_aOffset,
|
||||
LIR_Address::times_1,
|
||||
constant_aOffset,
|
||||
T_BYTE);
|
||||
|
||||
LIR_Address* addr_b = new LIR_Address(result_b,
|
||||
result_bOffset,
|
||||
LIR_Address::times_1,
|
||||
constant_bOffset,
|
||||
T_BYTE);
|
||||
|
||||
|
74
hotspot/src/cpu/x86/vm/c1_LIR_x86.cpp
Normal file
74
hotspot/src/cpu/x86/vm/c1_LIR_x86.cpp
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/register.hpp"
|
||||
#include "c1/c1_FrameMap.hpp"
|
||||
#include "c1/c1_LIR.hpp"
|
||||
|
||||
|
||||
FloatRegister LIR_OprDesc::as_float_reg() const {
|
||||
ShouldNotReachHere();
|
||||
return fnoreg;
|
||||
}
|
||||
|
||||
FloatRegister LIR_OprDesc::as_double_reg() const {
|
||||
ShouldNotReachHere();
|
||||
return fnoreg;
|
||||
}
|
||||
|
||||
XMMRegister LIR_OprDesc::as_xmm_float_reg() const {
|
||||
return FrameMap::nr2xmmreg(xmm_regnr());
|
||||
}
|
||||
|
||||
XMMRegister LIR_OprDesc::as_xmm_double_reg() const {
|
||||
assert(xmm_regnrLo() == xmm_regnrHi(), "assumed in calculation");
|
||||
return FrameMap::nr2xmmreg(xmm_regnrLo());
|
||||
}
|
||||
|
||||
// Reg2 unused.
|
||||
LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
|
||||
assert(as_FloatRegister(reg2) == fnoreg, "Not used on this platform");
|
||||
return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
|
||||
(reg1 << LIR_OprDesc::reg2_shift) |
|
||||
LIR_OprDesc::double_type |
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::double_size);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void LIR_Address::verify() const {
|
||||
#ifdef _LP64
|
||||
assert(base()->is_cpu_register(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#else
|
||||
assert(base()->is_single_cpu(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#endif
|
||||
}
|
||||
#endif // PRODUCT
|
@ -1060,7 +1060,7 @@ void MacroAssembler::fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xm
|
||||
|
||||
bind(B1_4);
|
||||
addq(rsp, 16);
|
||||
|
||||
pop(rbx);
|
||||
}
|
||||
#else
|
||||
// The 32 bit code is at most SSE2 compliant
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -50,6 +50,8 @@ REGISTER_DEFINITION(Register, r14);
|
||||
REGISTER_DEFINITION(Register, r15);
|
||||
#endif // AMD64
|
||||
|
||||
REGISTER_DEFINITION(FloatRegister, fnoreg);
|
||||
|
||||
REGISTER_DEFINITION(XMMRegister, xnoreg);
|
||||
REGISTER_DEFINITION(XMMRegister, xmm0 );
|
||||
REGISTER_DEFINITION(XMMRegister, xmm1 );
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -124,6 +124,8 @@ class FloatRegisterImpl: public AbstractRegisterImpl {
|
||||
|
||||
};
|
||||
|
||||
CONSTANT_REGISTER_DECLARATION(FloatRegister, fnoreg, (-1));
|
||||
|
||||
// Use XMMRegister as shortcut
|
||||
class XMMRegisterImpl;
|
||||
typedef XMMRegisterImpl* XMMRegister;
|
||||
|
@ -2131,7 +2131,7 @@ static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
|
||||
return size+offset_size;
|
||||
}
|
||||
|
||||
static inline jfloat replicate4_imm(int con, int width) {
|
||||
static inline jint replicate4_imm(int con, int width) {
|
||||
// Load a constant of "width" (in bytes) and replicate it to fill 32bit.
|
||||
assert(width == 1 || width == 2, "only byte or short types here");
|
||||
int bit_width = width * 8;
|
||||
@ -2141,11 +2141,10 @@ static inline jfloat replicate4_imm(int con, int width) {
|
||||
val |= (val << bit_width);
|
||||
bit_width <<= 1;
|
||||
}
|
||||
jfloat fval = *((jfloat*) &val); // coerce to float type
|
||||
return fval;
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline jdouble replicate8_imm(int con, int width) {
|
||||
static inline jlong replicate8_imm(int con, int width) {
|
||||
// Load a constant of "width" (in bytes) and replicate it to fill 64bit.
|
||||
assert(width == 1 || width == 2 || width == 4, "only byte, short or int types here");
|
||||
int bit_width = width * 8;
|
||||
@ -2155,8 +2154,7 @@ static inline jdouble replicate8_imm(int con, int width) {
|
||||
val |= (val << bit_width);
|
||||
bit_width <<= 1;
|
||||
}
|
||||
jdouble dval = *((jdouble*) &val); // coerce to double type
|
||||
return dval;
|
||||
return val;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
@ -26,17 +26,41 @@ import static jdk.vm.ci.meta.JavaKind.Void;
|
||||
import static jdk.vm.ci.meta.Value.ILLEGAL;
|
||||
import static jdk.vm.ci.sparc.SPARC.REGISTER_SAFE_AREA_SIZE;
|
||||
import static jdk.vm.ci.sparc.SPARC.d0;
|
||||
import static jdk.vm.ci.sparc.SPARC.d10;
|
||||
import static jdk.vm.ci.sparc.SPARC.d12;
|
||||
import static jdk.vm.ci.sparc.SPARC.d14;
|
||||
import static jdk.vm.ci.sparc.SPARC.d16;
|
||||
import static jdk.vm.ci.sparc.SPARC.d18;
|
||||
import static jdk.vm.ci.sparc.SPARC.d2;
|
||||
import static jdk.vm.ci.sparc.SPARC.d20;
|
||||
import static jdk.vm.ci.sparc.SPARC.d22;
|
||||
import static jdk.vm.ci.sparc.SPARC.d24;
|
||||
import static jdk.vm.ci.sparc.SPARC.d26;
|
||||
import static jdk.vm.ci.sparc.SPARC.d28;
|
||||
import static jdk.vm.ci.sparc.SPARC.d30;
|
||||
import static jdk.vm.ci.sparc.SPARC.d4;
|
||||
import static jdk.vm.ci.sparc.SPARC.d6;
|
||||
import static jdk.vm.ci.sparc.SPARC.d8;
|
||||
import static jdk.vm.ci.sparc.SPARC.f0;
|
||||
import static jdk.vm.ci.sparc.SPARC.f1;
|
||||
import static jdk.vm.ci.sparc.SPARC.f11;
|
||||
import static jdk.vm.ci.sparc.SPARC.f13;
|
||||
import static jdk.vm.ci.sparc.SPARC.f15;
|
||||
import static jdk.vm.ci.sparc.SPARC.f17;
|
||||
import static jdk.vm.ci.sparc.SPARC.f19;
|
||||
import static jdk.vm.ci.sparc.SPARC.f2;
|
||||
import static jdk.vm.ci.sparc.SPARC.f21;
|
||||
import static jdk.vm.ci.sparc.SPARC.f23;
|
||||
import static jdk.vm.ci.sparc.SPARC.f25;
|
||||
import static jdk.vm.ci.sparc.SPARC.f27;
|
||||
import static jdk.vm.ci.sparc.SPARC.f29;
|
||||
import static jdk.vm.ci.sparc.SPARC.f3;
|
||||
import static jdk.vm.ci.sparc.SPARC.f31;
|
||||
import static jdk.vm.ci.sparc.SPARC.f4;
|
||||
import static jdk.vm.ci.sparc.SPARC.f5;
|
||||
import static jdk.vm.ci.sparc.SPARC.f6;
|
||||
import static jdk.vm.ci.sparc.SPARC.f7;
|
||||
import static jdk.vm.ci.sparc.SPARC.f9;
|
||||
import static jdk.vm.ci.sparc.SPARC.g0;
|
||||
import static jdk.vm.ci.sparc.SPARC.g2;
|
||||
import static jdk.vm.ci.sparc.SPARC.g6;
|
||||
@ -95,11 +119,6 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
|
||||
|
||||
private final RegisterAttributes[] attributesMap;
|
||||
|
||||
/**
|
||||
* Does native code (C++ code) spill arguments in registers to the parent frame?
|
||||
*/
|
||||
private final boolean addNativeRegisterArgumentSlots;
|
||||
|
||||
@Override
|
||||
public RegisterArray getAllocatableRegisters() {
|
||||
return allocatable;
|
||||
@ -124,10 +143,18 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
|
||||
private final RegisterArray cpuCallerParameterRegisters = new RegisterArray(o0, o1, o2, o3, o4, o5);
|
||||
private final RegisterArray cpuCalleeParameterRegisters = new RegisterArray(i0, i1, i2, i3, i4, i5);
|
||||
|
||||
private final RegisterArray fpuFloatParameterRegisters = new RegisterArray(f0, f1, f2, f3, f4, f5, f6, f7);
|
||||
private final RegisterArray fpuDoubleParameterRegisters = new RegisterArray(d0, null, d2, null, d4, null, d6, null);
|
||||
private final RegisterArray fpuFloatJavaParameterRegisters = new RegisterArray(f0, f1, f2, f3, f4, f5, f6, f7);
|
||||
private final RegisterArray fpuDoubleJavaParameterRegisters = new RegisterArray(d0, null, d2, null, d4, null, d6, null);
|
||||
|
||||
// @formatter:off
|
||||
private final RegisterArray fpuFloatNativeParameterRegisters = new RegisterArray(
|
||||
f1, f3, f5, f7, f9, f11, f13, f15,
|
||||
f17, f19, f21, f23, f25, f27, f29, f31);
|
||||
|
||||
private final RegisterArray fpuDoubleNativeParameterRegisters = new RegisterArray(
|
||||
d0, d2, d4, d6, d8, d10, d12, d14,
|
||||
d16, d18, d20, d22, d24, d26, d28, d30);
|
||||
|
||||
private final RegisterArray callerSaveRegisters;
|
||||
|
||||
/**
|
||||
@ -170,7 +197,6 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
|
||||
public SPARCHotSpotRegisterConfig(TargetDescription target, RegisterArray allocatable) {
|
||||
this.target = target;
|
||||
this.allocatable = allocatable;
|
||||
this.addNativeRegisterArgumentSlots = false;
|
||||
HashSet<Register> callerSaveSet = new HashSet<>(target.arch.getAvailableValueRegisters().asList());
|
||||
for (Register cs : windowSaveRegisters) {
|
||||
callerSaveSet.remove(cs);
|
||||
@ -220,7 +246,7 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
|
||||
return hotspotType == HotSpotCallingConventionType.JavaCallee ? cpuCalleeParameterRegisters : cpuCallerParameterRegisters;
|
||||
case Double:
|
||||
case Float:
|
||||
return fpuFloatParameterRegisters;
|
||||
return fpuFloatJavaParameterRegisters;
|
||||
default:
|
||||
throw JVMCIError.shouldNotReachHere("Unknown JavaKind " + kind);
|
||||
}
|
||||
@ -233,48 +259,77 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
|
||||
int currentGeneral = 0;
|
||||
int currentFloating = 0;
|
||||
int currentStackOffset = 0;
|
||||
boolean isNative = type == HotSpotCallingConventionType.NativeCall;
|
||||
|
||||
for (int i = 0; i < parameterTypes.length; i++) {
|
||||
final JavaKind kind = parameterTypes[i].getJavaKind().getStackKind();
|
||||
|
||||
switch (kind) {
|
||||
case Byte:
|
||||
case Boolean:
|
||||
case Short:
|
||||
case Char:
|
||||
case Int:
|
||||
case Long:
|
||||
case Object:
|
||||
if (currentGeneral < generalParameterRegisters.size()) {
|
||||
Register register = generalParameterRegisters.get(currentGeneral++);
|
||||
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
|
||||
}
|
||||
break;
|
||||
case Double:
|
||||
if (currentFloating < fpuFloatParameterRegisters.size()) {
|
||||
if (currentFloating % 2 != 0) {
|
||||
// Make register number even to be a double reg
|
||||
currentFloating++;
|
||||
if (isNative) {
|
||||
RegisterArray registerSet;
|
||||
switch (kind) {
|
||||
case Byte:
|
||||
case Boolean:
|
||||
case Short:
|
||||
case Char:
|
||||
case Int:
|
||||
case Long:
|
||||
case Object:
|
||||
registerSet = generalParameterRegisters;
|
||||
break;
|
||||
case Double:
|
||||
registerSet = fpuDoubleNativeParameterRegisters;
|
||||
break;
|
||||
case Float:
|
||||
registerSet = fpuFloatNativeParameterRegisters;
|
||||
break;
|
||||
default:
|
||||
throw JVMCIError.shouldNotReachHere();
|
||||
}
|
||||
if (i < registerSet.size()) {
|
||||
locations[i] = registerSet.get(i).asValue(valueKindFactory.getValueKind(kind));
|
||||
currentStackOffset += target.arch.getWordSize();
|
||||
}
|
||||
} else {
|
||||
switch (kind) {
|
||||
case Byte:
|
||||
case Boolean:
|
||||
case Short:
|
||||
case Char:
|
||||
case Int:
|
||||
case Long:
|
||||
case Object:
|
||||
if (currentGeneral < generalParameterRegisters.size()) {
|
||||
Register register = generalParameterRegisters.get(currentGeneral++);
|
||||
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
|
||||
}
|
||||
Register register = fpuDoubleParameterRegisters.get(currentFloating);
|
||||
currentFloating += 2; // Only every second is a double register
|
||||
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
|
||||
}
|
||||
break;
|
||||
case Float:
|
||||
if (currentFloating < fpuFloatParameterRegisters.size()) {
|
||||
Register register = fpuFloatParameterRegisters.get(currentFloating++);
|
||||
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw JVMCIError.shouldNotReachHere();
|
||||
break;
|
||||
case Double:
|
||||
if (currentFloating < fpuFloatJavaParameterRegisters.size()) {
|
||||
if (currentFloating % 2 != 0) {
|
||||
// Make register number even to be a double reg
|
||||
currentFloating++;
|
||||
}
|
||||
Register register = fpuDoubleJavaParameterRegisters.get(currentFloating);
|
||||
currentFloating += 2; // Only every second is a double register
|
||||
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
|
||||
}
|
||||
break;
|
||||
case Float:
|
||||
if (currentFloating < fpuFloatJavaParameterRegisters.size()) {
|
||||
Register register = fpuFloatJavaParameterRegisters.get(currentFloating++);
|
||||
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw JVMCIError.shouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
if (locations[i] == null) {
|
||||
ValueKind<?> valueKind = valueKindFactory.getValueKind(kind);
|
||||
// Stack slot is always aligned to its size in bytes but minimum wordsize
|
||||
int typeSize = valueKind.getPlatformKind().getSizeInBytes();
|
||||
if (isNative) {
|
||||
currentStackOffset += target.arch.getWordSize() - typeSize;
|
||||
}
|
||||
currentStackOffset = roundUp(currentStackOffset, typeSize);
|
||||
int slotOffset = currentStackOffset + REGISTER_SAFE_AREA_SIZE;
|
||||
locations[i] = StackSlot.get(valueKind, slotOffset, !type.out);
|
||||
@ -284,15 +339,7 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
|
||||
|
||||
JavaKind returnKind = returnType == null ? Void : returnType.getJavaKind();
|
||||
AllocatableValue returnLocation = returnKind == Void ? ILLEGAL : getReturnRegister(returnKind, type).asValue(valueKindFactory.getValueKind(returnKind.getStackKind()));
|
||||
|
||||
int outArgSpillArea;
|
||||
if (type == HotSpotCallingConventionType.NativeCall && addNativeRegisterArgumentSlots) {
|
||||
// Space for native callee which may spill our outgoing arguments
|
||||
outArgSpillArea = Math.min(locations.length, generalParameterRegisters.size()) * target.wordSize;
|
||||
} else {
|
||||
outArgSpillArea = 0;
|
||||
}
|
||||
return new CallingConvention(currentStackOffset + outArgSpillArea, returnLocation, locations);
|
||||
return new CallingConvention(currentStackOffset, returnLocation, locations);
|
||||
}
|
||||
|
||||
private static int roundUp(int number, int mod) {
|
||||
|
@ -32,6 +32,11 @@ import jdk.vm.ci.services.Services;
|
||||
|
||||
final class HotSpotJVMCICompilerConfig {
|
||||
|
||||
/**
|
||||
* This factory allows JVMCI initialization to succeed but raises an error if the VM asks JVMCI
|
||||
* to perform a compilation. This allows the reflective parts of the JVMCI API to be used
|
||||
* without requiring a compiler implementation to be available.
|
||||
*/
|
||||
private static class DummyCompilerFactory extends JVMCICompilerFactory implements JVMCICompiler {
|
||||
|
||||
public HotSpotCompilationRequestResult compileMethod(CompilationRequest request) {
|
||||
@ -67,7 +72,6 @@ final class HotSpotJVMCICompilerConfig {
|
||||
for (JVMCICompilerFactory f : Services.load(JVMCICompilerFactory.class)) {
|
||||
if (f.getCompilerName().equals(compilerName)) {
|
||||
Services.exportJVMCITo(f.getClass());
|
||||
f.onSelection();
|
||||
factory = f;
|
||||
}
|
||||
}
|
||||
@ -75,8 +79,21 @@ final class HotSpotJVMCICompilerConfig {
|
||||
throw new JVMCIError("JVMCI compiler '%s' not found", compilerName);
|
||||
}
|
||||
} else {
|
||||
factory = new DummyCompilerFactory();
|
||||
// Auto select a single available compiler
|
||||
for (JVMCICompilerFactory f : Services.load(JVMCICompilerFactory.class)) {
|
||||
if (factory == null) {
|
||||
factory = f;
|
||||
} else {
|
||||
// Multiple factories seen - cancel auto selection
|
||||
factory = null;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (factory == null) {
|
||||
factory = new DummyCompilerFactory();
|
||||
}
|
||||
}
|
||||
factory.onSelection();
|
||||
compilerFactory = factory;
|
||||
}
|
||||
return compilerFactory;
|
||||
|
@ -109,13 +109,7 @@ class HotSpotMemoryAccessProviderImpl implements HotSpotMemoryAccessProvider {
|
||||
}
|
||||
}
|
||||
|
||||
private boolean verifyReadRawObject(Object expected, Constant base, long displacement, boolean compressed) {
|
||||
if (compressed == runtime.getConfig().useCompressedOops) {
|
||||
Object obj = asObject(base);
|
||||
if (obj != null) {
|
||||
assert expected == UNSAFE.getObject(obj, displacement) : "readUnsafeOop doesn't agree with unsafe.getObject";
|
||||
}
|
||||
}
|
||||
private boolean verifyReadRawObject(Object expected, Constant base, long displacement) {
|
||||
if (base instanceof HotSpotMetaspaceConstant) {
|
||||
MetaspaceWrapperObject metaspaceObject = HotSpotMetaspaceConstantImpl.getMetaspaceObject(base);
|
||||
if (metaspaceObject instanceof HotSpotResolvedObjectTypeImpl) {
|
||||
@ -136,11 +130,11 @@ class HotSpotMemoryAccessProviderImpl implements HotSpotMemoryAccessProvider {
|
||||
assert !compressed;
|
||||
displacement += asRawPointer(baseConstant);
|
||||
ret = UNSAFE.getUncompressedObject(displacement);
|
||||
assert verifyReadRawObject(ret, baseConstant, initialDisplacement);
|
||||
} else {
|
||||
assert runtime.getConfig().useCompressedOops == compressed;
|
||||
ret = UNSAFE.getObject(base, displacement);
|
||||
}
|
||||
assert verifyReadRawObject(ret, baseConstant, initialDisplacement, compressed);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,10 +28,12 @@ import static jdk.vm.ci.hotspot.HotSpotResolvedObjectTypeImpl.fromObjectClass;
|
||||
import jdk.vm.ci.common.JVMCIError;
|
||||
import jdk.vm.ci.meta.ConstantReflectionProvider;
|
||||
import jdk.vm.ci.meta.JavaConstant;
|
||||
import jdk.vm.ci.meta.JavaKind;
|
||||
import jdk.vm.ci.meta.MethodHandleAccessProvider;
|
||||
import jdk.vm.ci.meta.ResolvedJavaField;
|
||||
import jdk.vm.ci.meta.ResolvedJavaMethod;
|
||||
import jdk.vm.ci.meta.ResolvedJavaType;
|
||||
import jdk.vm.ci.meta.Signature;
|
||||
|
||||
public class HotSpotMethodHandleAccessProvider implements MethodHandleAccessProvider {
|
||||
|
||||
@ -51,46 +53,80 @@ public class HotSpotMethodHandleAccessProvider implements MethodHandleAccessProv
|
||||
static final ResolvedJavaMethod lambdaFormCompileToBytecodeMethod;
|
||||
static final HotSpotResolvedJavaField memberNameVmtargetField;
|
||||
|
||||
static final ResolvedJavaType CLASS = fromObjectClass(LazyInitialization.class);
|
||||
|
||||
/**
|
||||
* Search for an instance field with the given name in a class.
|
||||
*
|
||||
* @param className name of the class to search in
|
||||
* @param fieldName name of the field to be searched
|
||||
* @return resolved java field
|
||||
* @param fieldType resolved Java type of the field
|
||||
* @return resolved Java field
|
||||
* @throws ClassNotFoundException
|
||||
* @throws NoSuchFieldError
|
||||
*/
|
||||
private static ResolvedJavaField findFieldInClass(String className, String fieldName) throws ClassNotFoundException {
|
||||
private static ResolvedJavaField findFieldInClass(String className, String fieldName, ResolvedJavaType fieldType)
|
||||
throws ClassNotFoundException {
|
||||
Class<?> clazz = Class.forName(className);
|
||||
ResolvedJavaType type = runtime().fromClass(clazz);
|
||||
ResolvedJavaField[] fields = type.getInstanceFields(false);
|
||||
for (ResolvedJavaField field : fields) {
|
||||
if (field.getName().equals(fieldName)) {
|
||||
if (field.getName().equals(fieldName) && field.getType().equals(fieldType)) {
|
||||
return field;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
throw new NoSuchFieldError(fieldType.getName() + " " + className + "." + fieldName);
|
||||
}
|
||||
|
||||
private static ResolvedJavaMethod findMethodInClass(String className, String methodName) throws ClassNotFoundException {
|
||||
private static ResolvedJavaMethod findMethodInClass(String className, String methodName,
|
||||
ResolvedJavaType resultType, ResolvedJavaType[] parameterTypes) throws ClassNotFoundException {
|
||||
Class<?> clazz = Class.forName(className);
|
||||
HotSpotResolvedObjectTypeImpl type = fromObjectClass(clazz);
|
||||
ResolvedJavaMethod result = null;
|
||||
for (ResolvedJavaMethod method : type.getDeclaredMethods()) {
|
||||
if (method.getName().equals(methodName)) {
|
||||
assert result == null : "more than one method found: " + className + "." + methodName;
|
||||
if (method.getName().equals(methodName) && signatureMatches(method, resultType, parameterTypes)) {
|
||||
result = method;
|
||||
}
|
||||
}
|
||||
assert result != null : "method not found: " + className + "." + methodName;
|
||||
if (result == null) {
|
||||
StringBuilder sig = new StringBuilder("(");
|
||||
for (ResolvedJavaType t : parameterTypes) {
|
||||
sig.append(t.getName()).append(",");
|
||||
}
|
||||
if (sig.length() > 1) {
|
||||
sig.replace(sig.length() - 1, sig.length(), ")");
|
||||
} else {
|
||||
sig.append(')');
|
||||
}
|
||||
throw new NoSuchMethodError(resultType.getName() + " " + className + "." + methodName + sig.toString());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private static boolean signatureMatches(ResolvedJavaMethod m, ResolvedJavaType resultType,
|
||||
ResolvedJavaType[] parameterTypes) {
|
||||
Signature s = m.getSignature();
|
||||
if (!s.getReturnType(CLASS).equals(resultType)) {
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < s.getParameterCount(false); ++i) {
|
||||
if (!s.getParameterType(i, CLASS).equals(parameterTypes[i])) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static {
|
||||
try {
|
||||
methodHandleFormField = findFieldInClass("java.lang.invoke.MethodHandle", "form");
|
||||
lambdaFormVmentryField = findFieldInClass("java.lang.invoke.LambdaForm", "vmentry");
|
||||
lambdaFormCompileToBytecodeMethod = findMethodInClass("java.lang.invoke.LambdaForm", "compileToBytecode");
|
||||
memberNameVmtargetField = (HotSpotResolvedJavaField) findFieldInClass("java.lang.invoke.MemberName", "vmtarget");
|
||||
methodHandleFormField = findFieldInClass("java.lang.invoke.MethodHandle", "form",
|
||||
fromObjectClass(Class.forName("java.lang.invoke.LambdaForm")));
|
||||
lambdaFormVmentryField = findFieldInClass("java.lang.invoke.LambdaForm", "vmentry",
|
||||
fromObjectClass(Class.forName("java.lang.invoke.MemberName")));
|
||||
lambdaFormCompileToBytecodeMethod = findMethodInClass("java.lang.invoke.LambdaForm", "compileToBytecode",
|
||||
new HotSpotResolvedPrimitiveType(JavaKind.Void), new ResolvedJavaType[]{});
|
||||
memberNameVmtargetField = (HotSpotResolvedJavaField) findFieldInClass("java.lang.invoke.MemberName", "vmtarget",
|
||||
new HotSpotResolvedPrimitiveType(JavaKind.Long));
|
||||
} catch (Throwable ex) {
|
||||
throw new JVMCIError(ex);
|
||||
}
|
||||
@ -134,14 +170,12 @@ public class HotSpotMethodHandleAccessProvider implements MethodHandleAccessProv
|
||||
return null;
|
||||
}
|
||||
|
||||
JavaConstant memberName;
|
||||
if (forceBytecodeGeneration) {
|
||||
/* Invoke non-public method: MemberName LambdaForm.compileToBytecode() */
|
||||
memberName = LazyInitialization.lambdaFormCompileToBytecodeMethod.invoke(lambdaForm, new JavaConstant[0]);
|
||||
} else {
|
||||
/* Load non-public field: MemberName LambdaForm.vmentry */
|
||||
memberName = constantReflection.readFieldValue(LazyInitialization.lambdaFormVmentryField, lambdaForm);
|
||||
LazyInitialization.lambdaFormCompileToBytecodeMethod.invoke(lambdaForm, new JavaConstant[0]);
|
||||
}
|
||||
/* Load non-public field: MemberName LambdaForm.vmentry */
|
||||
JavaConstant memberName = constantReflection.readFieldValue(LazyInitialization.lambdaFormVmentryField, lambdaForm);
|
||||
return getTargetMethod(memberName);
|
||||
}
|
||||
|
||||
@ -163,3 +197,4 @@ public class HotSpotMethodHandleAccessProvider implements MethodHandleAccessProv
|
||||
return compilerToVM().getResolvedJavaMethod(object, LazyInitialization.memberNameVmtargetField.offset());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -96,15 +96,6 @@ public interface ConstantReflectionProvider {
|
||||
*/
|
||||
ResolvedJavaType asJavaType(Constant constant);
|
||||
|
||||
/**
|
||||
* Check if the constant is embeddable in the code.
|
||||
*
|
||||
* @param constant the constant to test
|
||||
*/
|
||||
default boolean isEmbeddable(Constant constant) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets access to the internals of {@link MethodHandle}.
|
||||
*/
|
||||
|
@ -30,32 +30,46 @@ package jdk.vm.ci.meta;
|
||||
public class LineNumberTable {
|
||||
|
||||
private final int[] lineNumbers;
|
||||
private final int[] bci;
|
||||
private final int[] bcis;
|
||||
|
||||
/**
|
||||
*
|
||||
* @param lineNumbers an array or source line numbers. This array is now owned by this object
|
||||
* @param lineNumbers an array of source line numbers. This array is now owned by this object
|
||||
* and should not be mutated by the caller.
|
||||
* @param bci an array of bytecode indexes the same length at {@code lineNumbers} whose entries
|
||||
* @param bcis an array of bytecode indexes the same length at {@code lineNumbers} whose entries
|
||||
* are sorted in ascending order. This array is now owned by this object and must not
|
||||
* be mutated by the caller.
|
||||
*/
|
||||
@SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "caller transfers ownership of `lineNumbers` and `bcis`")
|
||||
public LineNumberTable(int[] lineNumbers, int[] bci) {
|
||||
assert bci.length == lineNumbers.length;
|
||||
public LineNumberTable(int[] lineNumbers, int[] bcis) {
|
||||
assert bcis.length == lineNumbers.length;
|
||||
this.lineNumbers = lineNumbers;
|
||||
this.bci = bci;
|
||||
this.bcis = bcis;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a source line number for {@code atBci}.
|
||||
* Gets a source line number for bytecode index {@code atBci}.
|
||||
*/
|
||||
public int getLineNumber(int atBci) {
|
||||
for (int i = 0; i < this.bci.length - 1; i++) {
|
||||
if (this.bci[i] <= atBci && atBci < this.bci[i + 1]) {
|
||||
for (int i = 0; i < this.bcis.length - 1; i++) {
|
||||
if (this.bcis[i] <= atBci && atBci < this.bcis[i + 1]) {
|
||||
return lineNumbers[i];
|
||||
}
|
||||
}
|
||||
return lineNumbers[lineNumbers.length - 1];
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a copy of the array of line numbers that was passed to this object's constructor.
|
||||
*/
|
||||
public int[] getLineNumbers() {
|
||||
return lineNumbers.clone();
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a copy of the array of bytecode indexes that was passed to this object's constructor.
|
||||
*/
|
||||
public int[] getBcis() {
|
||||
return bcis.clone();
|
||||
}
|
||||
}
|
||||
|
@ -26,6 +26,8 @@ import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Describes the {@link Local}s for a Java method.
|
||||
*
|
||||
* @see "https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.13"
|
||||
*/
|
||||
public class LocalVariableTable {
|
||||
@ -33,6 +35,7 @@ public class LocalVariableTable {
|
||||
private final Local[] locals;
|
||||
|
||||
/**
|
||||
* Creates an object describing the {@link Local}s for a Java method.
|
||||
*
|
||||
* @param locals array of objects describing local variables. This array is now owned by this
|
||||
* object and must not be mutated by the caller.
|
||||
@ -42,6 +45,13 @@ public class LocalVariableTable {
|
||||
this.locals = locals;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a description of a local variable that occupies the bytecode frame slot indexed by
|
||||
* {@code slot} and is live at the bytecode index {@code bci}
|
||||
*
|
||||
* @return a description of the requested local variable or null if no such variable matches
|
||||
* {@code slot} and {@code bci}
|
||||
*/
|
||||
public Local getLocal(int slot, int bci) {
|
||||
Local result = null;
|
||||
for (Local local : locals) {
|
||||
@ -56,6 +66,16 @@ public class LocalVariableTable {
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a copy of the array of {@link Local}s that was passed to this object's constructor.
|
||||
*/
|
||||
public Local[] getLocals() {
|
||||
return locals.clone();
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a description of all the local variables live at the bytecode index {@code bci}
|
||||
*/
|
||||
public Local[] getLocalsAt(int bci) {
|
||||
List<Local> result = new ArrayList<>();
|
||||
for (Local l : locals) {
|
||||
|
@ -544,10 +544,6 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
|
||||
// factor me: setPC
|
||||
os::Solaris::ucontext_set_pc(uc, stub);
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (TraceJumps) thread->record_jump(stub, NULL, __FILE__, __LINE__);
|
||||
#endif /* PRODUCT */
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -337,6 +337,15 @@ class AbstractAssembler : public ResourceObj {
|
||||
//
|
||||
// We must remember the code section (insts or stubs) in c1
|
||||
// so we can reset to the proper section in end_a_const().
|
||||
address int_constant(jint c) {
|
||||
CodeSection* c1 = _code_section;
|
||||
address ptr = start_a_const(sizeof(c), sizeof(c));
|
||||
if (ptr != NULL) {
|
||||
emit_int32(c);
|
||||
end_a_const(c1);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
address long_constant(jlong c) {
|
||||
CodeSection* c1 = _code_section;
|
||||
address ptr = start_a_const(sizeof(c), sizeof(c));
|
||||
|
@ -194,12 +194,7 @@ class Compilation: public StackObj {
|
||||
const char* bailout_msg() const { return _bailout_msg; }
|
||||
|
||||
static int desired_max_code_buffer_size() {
|
||||
#ifndef PPC32
|
||||
return (int) NMethodSizeLimit; // default 256K or 512K
|
||||
#else
|
||||
// conditional branches on PPC are restricted to 16 bit signed
|
||||
return MIN2((unsigned int)NMethodSizeLimit,32*K);
|
||||
#endif
|
||||
return (int)NMethodSizeLimit; // default 64K
|
||||
}
|
||||
static int desired_max_constant_size() {
|
||||
return desired_max_code_buffer_size() / 10;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,44 +42,6 @@ Register LIR_OprDesc::as_register_hi() const {
|
||||
return FrameMap::cpu_rnr2reg(cpu_regnrHi());
|
||||
}
|
||||
|
||||
#if defined(X86)
|
||||
|
||||
XMMRegister LIR_OprDesc::as_xmm_float_reg() const {
|
||||
return FrameMap::nr2xmmreg(xmm_regnr());
|
||||
}
|
||||
|
||||
XMMRegister LIR_OprDesc::as_xmm_double_reg() const {
|
||||
assert(xmm_regnrLo() == xmm_regnrHi(), "assumed in calculation");
|
||||
return FrameMap::nr2xmmreg(xmm_regnrLo());
|
||||
}
|
||||
|
||||
#endif // X86
|
||||
|
||||
#if defined(SPARC) || defined(PPC32)
|
||||
|
||||
FloatRegister LIR_OprDesc::as_float_reg() const {
|
||||
return FrameMap::nr2floatreg(fpu_regnr());
|
||||
}
|
||||
|
||||
FloatRegister LIR_OprDesc::as_double_reg() const {
|
||||
return FrameMap::nr2floatreg(fpu_regnrHi());
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(ARM) || defined(AARCH64) || defined(PPC64)
|
||||
|
||||
FloatRegister LIR_OprDesc::as_float_reg() const {
|
||||
return as_FloatRegister(fpu_regnr());
|
||||
}
|
||||
|
||||
FloatRegister LIR_OprDesc::as_double_reg() const {
|
||||
return as_FloatRegister(fpu_regnrLo());
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
LIR_Opr LIR_OprFact::illegalOpr = LIR_OprFact::illegal();
|
||||
|
||||
LIR_Opr LIR_OprFact::value_type(ValueType* type) {
|
||||
@ -140,32 +102,6 @@ LIR_Address::Scale LIR_Address::scale(BasicType type) {
|
||||
return LIR_Address::times_1;
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
void LIR_Address::verify0() const {
|
||||
#if defined(SPARC) || defined(PPC)
|
||||
assert(scale() == times_1, "Scaled addressing mode not available on SPARC/PPC and should not be used");
|
||||
assert(disp() == 0 || index()->is_illegal(), "can't have both");
|
||||
#endif
|
||||
#ifdef _LP64
|
||||
assert(base()->is_cpu_register(), "wrong base operand");
|
||||
#ifndef AARCH64
|
||||
assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
|
||||
#else
|
||||
assert(index()->is_illegal() || index()->is_double_cpu() || index()->is_single_cpu(), "wrong index operand");
|
||||
#endif
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#else
|
||||
assert(base()->is_single_cpu(), "wrong base operand");
|
||||
assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
|
||||
assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
|
||||
"wrong type for addresses");
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
//---------------------------------------------------
|
||||
|
||||
char LIR_OprDesc::type_char(BasicType t) {
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "c1/c1_Defs.hpp"
|
||||
#include "c1/c1_ValueType.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class BlockBegin;
|
||||
class BlockList;
|
||||
@ -438,15 +439,13 @@ class LIR_OprDesc: public CompilationResourceObj {
|
||||
return as_register();
|
||||
}
|
||||
|
||||
#ifdef X86
|
||||
XMMRegister as_xmm_float_reg() const;
|
||||
XMMRegister as_xmm_double_reg() const;
|
||||
// for compatibility with RInfo
|
||||
int fpu () const { return lo_reg_half(); }
|
||||
#endif
|
||||
#if defined(SPARC) || defined(ARM) || defined(PPC) || defined(AARCH64)
|
||||
FloatRegister as_float_reg () const;
|
||||
FloatRegister as_double_reg () const;
|
||||
#ifdef X86
|
||||
XMMRegister as_xmm_float_reg () const;
|
||||
XMMRegister as_xmm_double_reg() const;
|
||||
// for compatibility with RInfo
|
||||
int fpu() const { return lo_reg_half(); }
|
||||
#endif
|
||||
|
||||
jint as_jint() const { return as_constant_ptr()->as_jint(); }
|
||||
@ -534,14 +533,19 @@ class LIR_Address: public LIR_OprPtr {
|
||||
, _type(type)
|
||||
, _disp(0) { verify(); }
|
||||
|
||||
#if defined(X86) || defined(ARM) || defined(AARCH64)
|
||||
LIR_Address(LIR_Opr base, LIR_Opr index, intx disp, BasicType type):
|
||||
_base(base)
|
||||
, _index(index)
|
||||
, _scale(times_1)
|
||||
, _type(type)
|
||||
, _disp(disp) { verify(); }
|
||||
|
||||
LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
|
||||
_base(base)
|
||||
, _index(index)
|
||||
, _scale(scale)
|
||||
, _type(type)
|
||||
, _disp(disp) { verify(); }
|
||||
#endif // X86 || ARM
|
||||
|
||||
LIR_Opr base() const { return _base; }
|
||||
LIR_Opr index() const { return _index; }
|
||||
@ -554,13 +558,7 @@ class LIR_Address: public LIR_OprPtr {
|
||||
virtual BasicType type() const { return _type; }
|
||||
virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
|
||||
|
||||
void verify0() const PRODUCT_RETURN;
|
||||
#if defined(LIR_ADDRESS_PD_VERIFY) && !defined(PRODUCT)
|
||||
void pd_verify() const;
|
||||
void verify() const { pd_verify(); }
|
||||
#else
|
||||
void verify() const { verify0(); }
|
||||
#endif
|
||||
void verify() const PRODUCT_RETURN;
|
||||
|
||||
static Scale scale(BasicType type);
|
||||
};
|
||||
@ -605,59 +603,49 @@ class LIR_OprFact: public AllStatic {
|
||||
LIR_OprDesc::double_size);
|
||||
}
|
||||
|
||||
static LIR_Opr single_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
|
||||
LIR_OprDesc::float_type |
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::single_size); }
|
||||
#if defined(ARM32)
|
||||
static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); }
|
||||
static LIR_Opr single_softfp(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::float_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
|
||||
static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); }
|
||||
#endif
|
||||
#ifdef SPARC
|
||||
static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
|
||||
(reg2 << LIR_OprDesc::reg2_shift) |
|
||||
LIR_OprDesc::double_type |
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::double_size); }
|
||||
#endif
|
||||
#if defined(X86) || defined(AARCH64)
|
||||
static LIR_Opr double_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
|
||||
(reg << LIR_OprDesc::reg2_shift) |
|
||||
LIR_OprDesc::double_type |
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::double_size); }
|
||||
static LIR_Opr single_fpu(int reg) {
|
||||
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
|
||||
LIR_OprDesc::float_type |
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::single_size);
|
||||
}
|
||||
|
||||
static LIR_Opr single_xmm(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
|
||||
LIR_OprDesc::float_type |
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::single_size |
|
||||
LIR_OprDesc::is_xmm_mask); }
|
||||
static LIR_Opr double_xmm(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
|
||||
(reg << LIR_OprDesc::reg2_shift) |
|
||||
LIR_OprDesc::double_type |
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::double_size |
|
||||
LIR_OprDesc::is_xmm_mask); }
|
||||
// Platform dependant.
|
||||
static LIR_Opr double_fpu(int reg1, int reg2 = -1 /*fnoreg*/);
|
||||
|
||||
#ifdef __SOFTFP__
|
||||
static LIR_Opr single_softfp(int reg) {
|
||||
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
|
||||
LIR_OprDesc::float_type |
|
||||
LIR_OprDesc::cpu_register |
|
||||
LIR_OprDesc::single_size);
|
||||
}
|
||||
static LIR_Opr double_softfp(int reg1, int reg2) {
|
||||
return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
|
||||
(reg2 << LIR_OprDesc::reg2_shift) |
|
||||
LIR_OprDesc::double_type |
|
||||
LIR_OprDesc::cpu_register |
|
||||
LIR_OprDesc::double_size);
|
||||
}
|
||||
#endif // __SOFTFP__
|
||||
|
||||
#if defined(X86)
|
||||
static LIR_Opr single_xmm(int reg) {
|
||||
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
|
||||
LIR_OprDesc::float_type |
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::single_size |
|
||||
LIR_OprDesc::is_xmm_mask);
|
||||
}
|
||||
static LIR_Opr double_xmm(int reg) {
|
||||
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
|
||||
(reg << LIR_OprDesc::reg2_shift) |
|
||||
LIR_OprDesc::double_type |
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::double_size |
|
||||
LIR_OprDesc::is_xmm_mask);
|
||||
}
|
||||
#endif // X86
|
||||
#if defined(PPC)
|
||||
static LIR_Opr double_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
|
||||
(reg << LIR_OprDesc::reg2_shift) |
|
||||
LIR_OprDesc::double_type |
|
||||
LIR_OprDesc::fpu_register |
|
||||
LIR_OprDesc::double_size); }
|
||||
#endif
|
||||
#ifdef PPC32
|
||||
static LIR_Opr single_softfp(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) |
|
||||
LIR_OprDesc::float_type |
|
||||
LIR_OprDesc::cpu_register |
|
||||
LIR_OprDesc::single_size); }
|
||||
static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg2 << LIR_OprDesc::reg1_shift) |
|
||||
(reg1 << LIR_OprDesc::reg2_shift) |
|
||||
LIR_OprDesc::double_type |
|
||||
LIR_OprDesc::cpu_register |
|
||||
LIR_OprDesc::double_size); }
|
||||
#endif // PPC32
|
||||
|
||||
static LIR_Opr virtual_register(int index, BasicType type) {
|
||||
LIR_Opr res;
|
||||
@ -1467,37 +1455,15 @@ class LIR_OpConvert: public LIR_Op1 {
|
||||
private:
|
||||
Bytecodes::Code _bytecode;
|
||||
ConversionStub* _stub;
|
||||
#ifdef PPC32
|
||||
LIR_Opr _tmp1;
|
||||
LIR_Opr _tmp2;
|
||||
#endif
|
||||
|
||||
public:
|
||||
LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub)
|
||||
: LIR_Op1(lir_convert, opr, result)
|
||||
, _stub(stub)
|
||||
#ifdef PPC32
|
||||
, _tmp1(LIR_OprDesc::illegalOpr())
|
||||
, _tmp2(LIR_OprDesc::illegalOpr())
|
||||
#endif
|
||||
, _bytecode(code) {}
|
||||
|
||||
#ifdef PPC32
|
||||
LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub
|
||||
,LIR_Opr tmp1, LIR_Opr tmp2)
|
||||
: LIR_Op1(lir_convert, opr, result)
|
||||
, _stub(stub)
|
||||
, _tmp1(tmp1)
|
||||
, _tmp2(tmp2)
|
||||
, _bytecode(code) {}
|
||||
#endif
|
||||
|
||||
Bytecodes::Code bytecode() const { return _bytecode; }
|
||||
ConversionStub* stub() const { return _stub; }
|
||||
#ifdef PPC32
|
||||
LIR_Opr tmp1() const { return _tmp1; }
|
||||
LIR_Opr tmp2() const { return _tmp2; }
|
||||
#endif
|
||||
|
||||
virtual void emit_code(LIR_Assembler* masm);
|
||||
virtual LIR_OpConvert* as_OpConvert() { return this; }
|
||||
@ -2136,9 +2102,6 @@ class LIR_List: public CompilationResourceObj {
|
||||
|
||||
void safepoint(LIR_Opr tmp, CodeEmitInfo* info) { append(new LIR_Op1(lir_safepoint, tmp, info)); }
|
||||
|
||||
#ifdef PPC32
|
||||
void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_OpConvert(code, left, dst, NULL, tmp1, tmp2)); }
|
||||
#endif
|
||||
void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
|
||||
|
||||
void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and, left, right, dst)); }
|
||||
|
@ -3434,7 +3434,7 @@ void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
|
||||
__ load(counter, result);
|
||||
__ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
|
||||
__ store(result, counter);
|
||||
if (notify) {
|
||||
if (notify && (!backedge || UseOnStackReplacement)) {
|
||||
LIR_Opr meth = LIR_OprFact::metadataConst(method->constant_encoding());
|
||||
// The bci for info can point to cmp for if's we want the if bci
|
||||
CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
|
||||
|
@ -204,11 +204,13 @@ ciEnv::ciEnv(Arena* arena) : _ciEnv_arena(mtCompiler) {
|
||||
}
|
||||
|
||||
ciEnv::~ciEnv() {
|
||||
CompilerThread* current_thread = CompilerThread::current();
|
||||
_factory->remove_symbols();
|
||||
// Need safepoint to clear the env on the thread. RedefineClasses might
|
||||
// be reading it.
|
||||
GUARDED_VM_ENTRY(current_thread->set_env(NULL);)
|
||||
GUARDED_VM_ENTRY(
|
||||
CompilerThread* current_thread = CompilerThread::current();
|
||||
_factory->remove_symbols();
|
||||
// Need safepoint to clear the env on the thread. RedefineClasses might
|
||||
// be reading it.
|
||||
current_thread->set_env(NULL);
|
||||
)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
@ -490,7 +490,8 @@ class CompileReplay : public StackObj {
|
||||
int comp_level = parse_int(comp_level_label);
|
||||
// old version w/o comp_level
|
||||
if (had_error() && (error_message() == comp_level_label)) {
|
||||
comp_level = CompLevel_full_optimization;
|
||||
// use highest available tier
|
||||
comp_level = TieredCompilation ? TieredStopAtLevel : CompLevel_highest_tier;
|
||||
}
|
||||
if (!is_valid_comp_level(comp_level)) {
|
||||
return;
|
||||
|
@ -551,17 +551,6 @@ void CompileBroker::compilation_init(TRAPS) {
|
||||
} else {
|
||||
c1_count = JVMCIHostThreads;
|
||||
}
|
||||
|
||||
if (!UseInterpreter || !BackgroundCompilation) {
|
||||
// Force initialization of JVMCI compiler otherwise JVMCI
|
||||
// compilations will not block until JVMCI is initialized
|
||||
ResourceMark rm;
|
||||
TempNewSymbol getCompiler = SymbolTable::new_symbol("getCompiler", CHECK);
|
||||
TempNewSymbol sig = SymbolTable::new_symbol("()Ljdk/vm/ci/runtime/JVMCICompiler;", CHECK);
|
||||
Handle jvmciRuntime = JVMCIRuntime::get_HotSpotJVMCIRuntime(CHECK);
|
||||
JavaValue result(T_OBJECT);
|
||||
JavaCalls::call_virtual(&result, jvmciRuntime, HotSpotJVMCIRuntime::klass(), getCompiler, sig, CHECK);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // INCLUDE_JVMCI
|
||||
|
@ -273,14 +273,9 @@ class DoNothingClosure: public OopClosure {
|
||||
static DoNothingClosure do_nothing;
|
||||
|
||||
static void add_derived_oop(oop* base, oop* derived) {
|
||||
#ifndef TIERED
|
||||
#if !defined(TIERED) && !defined(INCLUDE_JVMCI)
|
||||
COMPILER1_PRESENT(ShouldNotReachHere();)
|
||||
#if INCLUDE_JVMCI
|
||||
if (UseJVMCICompiler) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
#endif
|
||||
#endif // TIERED
|
||||
#endif // !defined(TIERED) && !defined(INCLUDE_JVMCI)
|
||||
#if defined(COMPILER2) || INCLUDE_JVMCI
|
||||
DerivedPointerTable::add(derived, base);
|
||||
#endif // COMPILER2 || INCLUDE_JVMCI
|
||||
@ -473,13 +468,8 @@ void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) {
|
||||
#ifndef PRODUCT
|
||||
|
||||
bool ImmutableOopMap::has_derived_pointer() const {
|
||||
#ifndef TIERED
|
||||
#if !defined(TIERED) && !defined(INCLUDE_JVMCI)
|
||||
COMPILER1_PRESENT(return false);
|
||||
#if INCLUDE_JVMCI
|
||||
if (UseJVMCICompiler) {
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
#endif // !TIERED
|
||||
#if defined(COMPILER2) || INCLUDE_JVMCI
|
||||
OopMapStream oms(this,OopMapValue::derived_oop_value);
|
||||
|
@ -101,14 +101,16 @@ class TemplateInterpreterGenerator: public AbstractInterpreterGenerator {
|
||||
address generate_Double_longBitsToDouble_entry();
|
||||
address generate_Double_doubleToRawLongBits_entry();
|
||||
#endif // IA32
|
||||
// Some platforms don't need registers, other need two. Unused function is
|
||||
// left unimplemented.
|
||||
void generate_stack_overflow_check(void);
|
||||
void generate_stack_overflow_check(Register Rframe_size, Register Rscratch);
|
||||
|
||||
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
|
||||
void generate_counter_overflow(Label& continue_entry);
|
||||
|
||||
void generate_fixed_frame(bool native_call);
|
||||
#ifdef SPARC
|
||||
void generate_stack_overflow_check(Register Rframe_size, Register Rscratch);
|
||||
void save_native_result(void);
|
||||
void restore_native_result(void);
|
||||
#endif // SPARC
|
||||
@ -119,10 +121,7 @@ class TemplateInterpreterGenerator: public AbstractInterpreterGenerator {
|
||||
|
||||
#ifdef PPC
|
||||
void lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded=false);
|
||||
void unlock_method(bool check_exceptions = true);
|
||||
|
||||
void generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals);
|
||||
void generate_stack_overflow_check(Register Rframe_size, Register Rscratch1);
|
||||
#endif // PPC
|
||||
|
||||
public:
|
||||
|
@ -612,6 +612,17 @@ JRT_ENTRY(jint, JVMCIRuntime::test_deoptimize_call_int(JavaThread* thread, int v
|
||||
return value;
|
||||
JRT_END
|
||||
|
||||
void JVMCIRuntime::force_initialization(TRAPS) {
|
||||
JVMCIRuntime::initialize_well_known_classes(CHECK);
|
||||
|
||||
ResourceMark rm;
|
||||
TempNewSymbol getCompiler = SymbolTable::new_symbol("getCompiler", CHECK);
|
||||
TempNewSymbol sig = SymbolTable::new_symbol("()Ljdk/vm/ci/runtime/JVMCICompiler;", CHECK);
|
||||
Handle jvmciRuntime = JVMCIRuntime::get_HotSpotJVMCIRuntime(CHECK);
|
||||
JavaValue result(T_OBJECT);
|
||||
JavaCalls::call_virtual(&result, jvmciRuntime, HotSpotJVMCIRuntime::klass(), getCompiler, sig, CHECK);
|
||||
}
|
||||
|
||||
// private static JVMCIRuntime JVMCI.initializeRuntime()
|
||||
JVM_ENTRY(jobject, JVM_GetJVMCIRuntime(JNIEnv *env, jclass c))
|
||||
if (!EnableJVMCI) {
|
||||
|
@ -157,6 +157,9 @@ class JVMCIRuntime: public AllStatic {
|
||||
static void throw_klass_external_name_exception(JavaThread* thread, const char* exception, Klass* klass);
|
||||
static void throw_class_cast_exception(JavaThread* thread, const char* exception, Klass* caster_klass, Klass* target_klass);
|
||||
|
||||
// Forces initialization of the JVMCI runtime.
|
||||
static void force_initialization(TRAPS);
|
||||
|
||||
// Test only function
|
||||
static int test_deoptimize_call_int(JavaThread* thread, int value);
|
||||
};
|
||||
|
@ -148,6 +148,22 @@ bool JVMCIGlobals::check_jvmci_flags_are_consistent() {
|
||||
#undef JVMCI_PRODUCT_FLAG_VALUE_CHANGED_CHECK_CODE
|
||||
#undef JVMCI_EXPERIMENTAL_FLAG_VALUE_CHANGED_CHECK_CODE
|
||||
|
||||
#ifndef TIERED
|
||||
// JVMCI is only usable as a jit compiler if the VM supports tiered compilation.
|
||||
#define JVMCI_CHECK_TIERED_ONLY_FLAG(FLAG) \
|
||||
if (!FLAG_IS_DEFAULT(FLAG)) { \
|
||||
jio_fprintf(defaultStream::error_stream(), "VM option '%s' cannot be set in non-tiered VM\n", #FLAG); \
|
||||
return false; \
|
||||
}
|
||||
JVMCI_CHECK_TIERED_ONLY_FLAG(UseJVMCICompiler)
|
||||
JVMCI_CHECK_TIERED_ONLY_FLAG(BootstrapJVMCI)
|
||||
JVMCI_CHECK_TIERED_ONLY_FLAG(PrintBootstrap)
|
||||
JVMCI_CHECK_TIERED_ONLY_FLAG(JVMCIThreads)
|
||||
JVMCI_CHECK_TIERED_ONLY_FLAG(JVMCIHostThreads)
|
||||
JVMCI_CHECK_TIERED_ONLY_FLAG(JVMCICountersExcludeCompiler)
|
||||
#undef JVMCI_CHECK_TIERED_ONLY_FLAG
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -550,7 +550,7 @@
|
||||
declare_function(os::javaTimeNanos) \
|
||||
\
|
||||
declare_function(Deoptimization::fetch_unroll_info) \
|
||||
COMPILER2_PRESENT(declare_function(Deoptimization::uncommon_trap)) \
|
||||
declare_function(Deoptimization::uncommon_trap) \
|
||||
declare_function(Deoptimization::unpack_frames) \
|
||||
\
|
||||
declare_function(JVMCIRuntime::new_instance) \
|
||||
|
@ -3814,6 +3814,7 @@ bool Compile::Constant::operator==(const Constant& other) {
|
||||
if (can_be_reused() != other.can_be_reused()) return false;
|
||||
// For floating point values we compare the bit pattern.
|
||||
switch (type()) {
|
||||
case T_INT:
|
||||
case T_FLOAT: return (_v._value.i == other._v._value.i);
|
||||
case T_LONG:
|
||||
case T_DOUBLE: return (_v._value.j == other._v._value.j);
|
||||
@ -3828,6 +3829,7 @@ bool Compile::Constant::operator==(const Constant& other) {
|
||||
|
||||
static int type_to_size_in_bytes(BasicType t) {
|
||||
switch (t) {
|
||||
case T_INT: return sizeof(jint );
|
||||
case T_LONG: return sizeof(jlong );
|
||||
case T_FLOAT: return sizeof(jfloat );
|
||||
case T_DOUBLE: return sizeof(jdouble);
|
||||
@ -3896,6 +3898,7 @@ void Compile::ConstantTable::emit(CodeBuffer& cb) {
|
||||
Constant con = _constants.at(i);
|
||||
address constant_addr = NULL;
|
||||
switch (con.type()) {
|
||||
case T_INT: constant_addr = _masm.int_constant( con.get_jint() ); break;
|
||||
case T_LONG: constant_addr = _masm.long_constant( con.get_jlong() ); break;
|
||||
case T_FLOAT: constant_addr = _masm.float_constant( con.get_jfloat() ); break;
|
||||
case T_DOUBLE: constant_addr = _masm.double_constant(con.get_jdouble()); break;
|
||||
|
@ -264,6 +264,7 @@ class Compile : public Phase {
|
||||
|
||||
BasicType type() const { return _type; }
|
||||
|
||||
jint get_jint() const { return _v._value.i; }
|
||||
jlong get_jlong() const { return _v._value.j; }
|
||||
jfloat get_jfloat() const { return _v._value.f; }
|
||||
jdouble get_jdouble() const { return _v._value.d; }
|
||||
@ -320,6 +321,14 @@ class Compile : public Phase {
|
||||
Constant add(MachConstantNode* n, BasicType type, jvalue value);
|
||||
Constant add(Metadata* metadata);
|
||||
Constant add(MachConstantNode* n, MachOper* oper);
|
||||
Constant add(MachConstantNode* n, jint i) {
|
||||
jvalue value; value.i = i;
|
||||
return add(n, T_INT, value);
|
||||
}
|
||||
Constant add(MachConstantNode* n, jlong j) {
|
||||
jvalue value; value.j = j;
|
||||
return add(n, T_LONG, value);
|
||||
}
|
||||
Constant add(MachConstantNode* n, jfloat f) {
|
||||
jvalue value; value.f = f;
|
||||
return add(n, T_FLOAT, value);
|
||||
|
@ -222,7 +222,6 @@ class LibraryCallKit : public GraphKit {
|
||||
Node* round_double_node(Node* n);
|
||||
bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
|
||||
bool inline_math_native(vmIntrinsics::ID id);
|
||||
bool inline_trig(vmIntrinsics::ID id);
|
||||
bool inline_math(vmIntrinsics::ID id);
|
||||
template <typename OverflowOp>
|
||||
bool inline_math_overflow(Node* arg1, Node* arg2);
|
||||
@ -1404,18 +1403,20 @@ bool LibraryCallKit::inline_string_copy(bool compress) {
|
||||
(!compress && src_elem == T_BYTE && (dst_elem == T_BYTE || dst_elem == T_CHAR)),
|
||||
"Unsupported array types for inline_string_copy");
|
||||
|
||||
// Range checks
|
||||
generate_string_range_check(src, src_offset, length, compress && src_elem == T_BYTE);
|
||||
generate_string_range_check(dst, dst_offset, length, !compress && dst_elem == T_BYTE);
|
||||
if (stopped()) {
|
||||
return true;
|
||||
// Convert char[] offsets to byte[] offsets
|
||||
bool convert_src = (compress && src_elem == T_BYTE);
|
||||
bool convert_dst = (!compress && dst_elem == T_BYTE);
|
||||
if (convert_src) {
|
||||
src_offset = _gvn.transform(new LShiftINode(src_offset, intcon(1)));
|
||||
} else if (convert_dst) {
|
||||
dst_offset = _gvn.transform(new LShiftINode(dst_offset, intcon(1)));
|
||||
}
|
||||
|
||||
// Convert char[] offsets to byte[] offsets
|
||||
if (compress && src_elem == T_BYTE) {
|
||||
src_offset = _gvn.transform(new LShiftINode(src_offset, intcon(1)));
|
||||
} else if (!compress && dst_elem == T_BYTE) {
|
||||
dst_offset = _gvn.transform(new LShiftINode(dst_offset, intcon(1)));
|
||||
// Range checks
|
||||
generate_string_range_check(src, src_offset, length, convert_src);
|
||||
generate_string_range_check(dst, dst_offset, length, convert_dst);
|
||||
if (stopped()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
Node* src_start = array_element_address(src, src_offset, src_elem);
|
||||
@ -1691,94 +1692,6 @@ bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
|
||||
return true;
|
||||
}
|
||||
|
||||
//------------------------------inline_trig----------------------------------
|
||||
// Inline sin/cos/tan instructions, if possible. If rounding is required, do
|
||||
// argument reduction which will turn into a fast/slow diamond.
|
||||
bool LibraryCallKit::inline_trig(vmIntrinsics::ID id) {
|
||||
Node* arg = round_double_node(argument(0));
|
||||
Node* n = NULL;
|
||||
|
||||
n = _gvn.transform(n);
|
||||
|
||||
// Rounding required? Check for argument reduction!
|
||||
if (Matcher::strict_fp_requires_explicit_rounding) {
|
||||
static const double pi_4 = 0.7853981633974483;
|
||||
static const double neg_pi_4 = -0.7853981633974483;
|
||||
// pi/2 in 80-bit extended precision
|
||||
// static const unsigned char pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0x3f,0x00,0x00,0x00,0x00,0x00,0x00};
|
||||
// -pi/2 in 80-bit extended precision
|
||||
// static const unsigned char neg_pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0xbf,0x00,0x00,0x00,0x00,0x00,0x00};
|
||||
// Cutoff value for using this argument reduction technique
|
||||
//static const double pi_2_minus_epsilon = 1.564660403643354;
|
||||
//static const double neg_pi_2_plus_epsilon = -1.564660403643354;
|
||||
|
||||
// Pseudocode for sin:
|
||||
// if (x <= Math.PI / 4.0) {
|
||||
// if (x >= -Math.PI / 4.0) return fsin(x);
|
||||
// if (x >= -Math.PI / 2.0) return -fcos(x + Math.PI / 2.0);
|
||||
// } else {
|
||||
// if (x <= Math.PI / 2.0) return fcos(x - Math.PI / 2.0);
|
||||
// }
|
||||
// return StrictMath.sin(x);
|
||||
|
||||
// Pseudocode for cos:
|
||||
// if (x <= Math.PI / 4.0) {
|
||||
// if (x >= -Math.PI / 4.0) return fcos(x);
|
||||
// if (x >= -Math.PI / 2.0) return fsin(x + Math.PI / 2.0);
|
||||
// } else {
|
||||
// if (x <= Math.PI / 2.0) return -fsin(x - Math.PI / 2.0);
|
||||
// }
|
||||
// return StrictMath.cos(x);
|
||||
|
||||
// Actually, sticking in an 80-bit Intel value into C2 will be tough; it
|
||||
// requires a special machine instruction to load it. Instead we'll try
|
||||
// the 'easy' case. If we really need the extra range +/- PI/2 we'll
|
||||
// probably do the math inside the SIN encoding.
|
||||
|
||||
// Make the merge point
|
||||
RegionNode* r = new RegionNode(3);
|
||||
Node* phi = new PhiNode(r, Type::DOUBLE);
|
||||
|
||||
// Flatten arg so we need only 1 test
|
||||
Node *abs = _gvn.transform(new AbsDNode(arg));
|
||||
// Node for PI/4 constant
|
||||
Node *pi4 = makecon(TypeD::make(pi_4));
|
||||
// Check PI/4 : abs(arg)
|
||||
Node *cmp = _gvn.transform(new CmpDNode(pi4,abs));
|
||||
// Check: If PI/4 < abs(arg) then go slow
|
||||
Node *bol = _gvn.transform(new BoolNode( cmp, BoolTest::lt ));
|
||||
// Branch either way
|
||||
IfNode *iff = create_and_xform_if(control(),bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
|
||||
set_control(opt_iff(r,iff));
|
||||
|
||||
// Set fast path result
|
||||
phi->init_req(2, n);
|
||||
|
||||
// Slow path - non-blocking leaf call
|
||||
Node* call = NULL;
|
||||
switch (id) {
|
||||
case vmIntrinsics::_dtan:
|
||||
call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::dtan),
|
||||
"Tan", NULL, arg, top());
|
||||
break;
|
||||
}
|
||||
assert(control()->in(0) == call, "");
|
||||
Node* slow_result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
|
||||
r->init_req(1, control());
|
||||
phi->init_req(1, slow_result);
|
||||
|
||||
// Post-merge
|
||||
set_control(_gvn.transform(r));
|
||||
record_for_igvn(r);
|
||||
n = _gvn.transform(phi);
|
||||
|
||||
C->set_has_split_ifs(true); // Has chance for split-if optimization
|
||||
}
|
||||
set_result(n);
|
||||
return true;
|
||||
}
|
||||
|
||||
//------------------------------runtime_math-----------------------------
|
||||
bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) {
|
||||
assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(),
|
||||
@ -2424,6 +2337,8 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c
|
||||
return false;
|
||||
}
|
||||
mismatched = (bt != type);
|
||||
} else if (alias_type->adr_type() == TypeOopPtr::BOTTOM) {
|
||||
mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
|
||||
}
|
||||
|
||||
// First guess at the value type.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -70,9 +70,9 @@ void IdealLoopTree::record_for_igvn() {
|
||||
}
|
||||
|
||||
//------------------------------compute_exact_trip_count-----------------------
|
||||
// Compute loop exact trip count if possible. Do not recalculate trip count for
|
||||
// Compute loop trip count if possible. Do not recalculate trip count for
|
||||
// split loops (pre-main-post) which have their limits and inits behind Opaque node.
|
||||
void IdealLoopTree::compute_exact_trip_count( PhaseIdealLoop *phase ) {
|
||||
void IdealLoopTree::compute_trip_count(PhaseIdealLoop* phase) {
|
||||
if (!_head->as_Loop()->is_valid_counted_loop()) {
|
||||
return;
|
||||
}
|
||||
@ -94,17 +94,21 @@ void IdealLoopTree::compute_exact_trip_count( PhaseIdealLoop *phase ) {
|
||||
|
||||
Node* init_n = cl->init_trip();
|
||||
Node* limit_n = cl->limit();
|
||||
if (init_n != NULL && init_n->is_Con() &&
|
||||
limit_n != NULL && limit_n->is_Con()) {
|
||||
if (init_n != NULL && limit_n != NULL) {
|
||||
// Use longs to avoid integer overflow.
|
||||
int stride_con = cl->stride_con();
|
||||
jlong init_con = cl->init_trip()->get_int();
|
||||
jlong limit_con = cl->limit()->get_int();
|
||||
int stride_m = stride_con - (stride_con > 0 ? 1 : -1);
|
||||
int stride_con = cl->stride_con();
|
||||
jlong init_con = phase->_igvn.type(init_n)->is_int()->_lo;
|
||||
jlong limit_con = phase->_igvn.type(limit_n)->is_int()->_hi;
|
||||
int stride_m = stride_con - (stride_con > 0 ? 1 : -1);
|
||||
jlong trip_count = (limit_con - init_con + stride_m)/stride_con;
|
||||
if (trip_count > 0 && (julong)trip_count < (julong)max_juint) {
|
||||
// Set exact trip count.
|
||||
cl->set_exact_trip_count((uint)trip_count);
|
||||
if (init_n->is_Con() && limit_n->is_Con()) {
|
||||
// Set exact trip count.
|
||||
cl->set_exact_trip_count((uint)trip_count);
|
||||
} else if (cl->unrolled_count() == 1) {
|
||||
// Set maximum trip count before unrolling.
|
||||
cl->set_trip_count((uint)trip_count);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1305,7 +1309,7 @@ Node *PhaseIdealLoop::insert_post_loop(IdealLoopTree *loop, Node_List &old_new,
|
||||
assert(main_exit->Opcode() == Op_IfFalse, "");
|
||||
int dd_main_exit = dom_depth(main_exit);
|
||||
|
||||
// Step A1: Clone the loop body of main. The clone becomes the vector post-loop.
|
||||
// Step A1: Clone the loop body of main. The clone becomes the post-loop.
|
||||
// The main loop pre-header illegally has 2 control users (old & new loops).
|
||||
clone_loop(loop, old_new, dd_main_exit);
|
||||
assert(old_new[main_end->_idx]->Opcode() == Op_CountedLoopEnd, "");
|
||||
@ -2095,8 +2099,7 @@ int PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
|
||||
// the loop is in canonical form to multiversion.
|
||||
closed_range_checks = 0;
|
||||
|
||||
// Check loop body for tests of trip-counter plus loop-invariant vs
|
||||
// loop-invariant.
|
||||
// Check loop body for tests of trip-counter plus loop-invariant vs loop-variant.
|
||||
for( uint i = 0; i < loop->_body.size(); i++ ) {
|
||||
Node *iff = loop->_body[i];
|
||||
if (iff->Opcode() == Op_If ||
|
||||
@ -2298,7 +2301,7 @@ void PhaseIdealLoop::has_range_checks(IdealLoopTree *loop) {
|
||||
// skip this loop if it is already checked
|
||||
if (cl->has_been_range_checked()) return;
|
||||
|
||||
// Now check for existance of range checks
|
||||
// Now check for existence of range checks
|
||||
for (uint i = 0; i < loop->_body.size(); i++) {
|
||||
Node *iff = loop->_body[i];
|
||||
int iff_opc = iff->Opcode();
|
||||
@ -2319,7 +2322,7 @@ bool PhaseIdealLoop::multi_version_post_loops(IdealLoopTree *rce_loop, IdealLoop
|
||||
CountedLoopNode *legacy_cl = legacy_loop->_head->as_CountedLoop();
|
||||
assert(legacy_cl->is_post_loop(), "");
|
||||
|
||||
// Check for existance of range checks using the unique instance to make a guard with
|
||||
// Check for existence of range checks using the unique instance to make a guard with
|
||||
Unique_Node_List worklist;
|
||||
for (uint i = 0; i < legacy_loop->_body.size(); i++) {
|
||||
Node *iff = legacy_loop->_body[i];
|
||||
@ -2422,7 +2425,7 @@ bool PhaseIdealLoop::multi_version_post_loops(IdealLoopTree *rce_loop, IdealLoop
|
||||
}
|
||||
|
||||
//-------------------------poison_rce_post_loop--------------------------------
|
||||
// Causes the rce'd post loop to be optimized away if multiverioning fails
|
||||
// Causes the rce'd post loop to be optimized away if multiversioning fails
|
||||
void PhaseIdealLoop::poison_rce_post_loop(IdealLoopTree *rce_loop) {
|
||||
CountedLoopNode *rce_cl = rce_loop->_head->as_CountedLoop();
|
||||
Node* ctrl = rce_cl->in(LoopNode::EntryControl);
|
||||
@ -2710,8 +2713,8 @@ bool IdealLoopTree::policy_do_one_iteration_loop( PhaseIdealLoop *phase ) {
|
||||
//=============================================================================
|
||||
//------------------------------iteration_split_impl---------------------------
|
||||
bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) {
|
||||
// Compute exact loop trip count if possible.
|
||||
compute_exact_trip_count(phase);
|
||||
// Compute loop trip count if possible.
|
||||
compute_trip_count(phase);
|
||||
|
||||
// Convert one iteration loop into normal code.
|
||||
if (policy_do_one_iteration_loop(phase))
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -520,8 +520,8 @@ public:
|
||||
// Return TRUE if "iff" is a range check.
|
||||
bool is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const;
|
||||
|
||||
// Compute loop exact trip count if possible
|
||||
void compute_exact_trip_count( PhaseIdealLoop *phase );
|
||||
// Compute loop trip count if possible
|
||||
void compute_trip_count(PhaseIdealLoop* phase);
|
||||
|
||||
// Compute loop trip count from profile data
|
||||
void compute_profile_trip_cnt( PhaseIdealLoop *phase );
|
||||
|
@ -1596,8 +1596,12 @@ void PhaseMacroExpand::expand_allocate_common(
|
||||
// All nodes that depended on the InitializeNode for control
|
||||
// and memory must now depend on the MemBarNode that itself
|
||||
// depends on the InitializeNode
|
||||
_igvn.replace_node(init_ctrl, ctrl);
|
||||
_igvn.replace_node(init_mem, mem);
|
||||
if (init_ctrl != NULL) {
|
||||
_igvn.replace_node(init_ctrl, ctrl);
|
||||
}
|
||||
if (init_mem != NULL) {
|
||||
_igvn.replace_node(init_mem, mem);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1713,9 +1713,6 @@ const Type* LoadNode::Value(PhaseGVN* phase) const {
|
||||
}
|
||||
}
|
||||
} else if (tp->base() == Type::InstPtr) {
|
||||
ciEnv* env = C->env();
|
||||
const TypeInstPtr* tinst = tp->is_instptr();
|
||||
ciKlass* klass = tinst->klass();
|
||||
assert( off != Type::OffsetBot ||
|
||||
// arrays can be cast to Objects
|
||||
tp->is_oopptr()->klass()->is_java_lang_Object() ||
|
||||
@ -1723,9 +1720,11 @@ const Type* LoadNode::Value(PhaseGVN* phase) const {
|
||||
C->has_unsafe_access(),
|
||||
"Field accesses must be precise" );
|
||||
// For oop loads, we expect the _type to be precise.
|
||||
// Optimizations for constant objects
|
||||
|
||||
// Optimize loads from constant fields.
|
||||
const TypeInstPtr* tinst = tp->is_instptr();
|
||||
ciObject* const_oop = tinst->const_oop();
|
||||
if (const_oop != NULL && const_oop->is_instance()) {
|
||||
if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != NULL && const_oop->is_instance()) {
|
||||
const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), memory_type());
|
||||
if (con_type != NULL) {
|
||||
return con_type;
|
||||
|
@ -952,7 +952,7 @@ CodeBuffer* Compile::init_buffer(uint* blk_starts) {
|
||||
// Set the initially allocated size
|
||||
int code_req = initial_code_capacity;
|
||||
int locs_req = initial_locs_capacity;
|
||||
int stub_req = TraceJumps ? initial_stub_capacity * 10 : initial_stub_capacity;
|
||||
int stub_req = initial_stub_capacity;
|
||||
int const_req = initial_const_capacity;
|
||||
|
||||
int pad_req = NativeCall::instruction_size;
|
||||
|
@ -936,7 +936,7 @@ public:
|
||||
};
|
||||
|
||||
//------------------------------TypeOopPtr-------------------------------------
|
||||
// Some kind of oop (Java pointer), either klass or instance or array.
|
||||
// Some kind of oop (Java pointer), either instance or array.
|
||||
class TypeOopPtr : public TypePtr {
|
||||
protected:
|
||||
TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id,
|
||||
|
@ -806,9 +806,6 @@ public:
|
||||
develop(bool, TracePcPatching, false, \
|
||||
"Trace usage of frame::patch_pc") \
|
||||
\
|
||||
develop(bool, TraceJumps, false, \
|
||||
"Trace assembly jumps in thread ring buffer") \
|
||||
\
|
||||
develop(bool, TraceRelocator, false, \
|
||||
"Trace the bytecode relocator") \
|
||||
\
|
||||
|
@ -388,16 +388,6 @@ class SharedRuntime: AllStatic {
|
||||
static int c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, VMRegPair *regs2,
|
||||
int total_args_passed);
|
||||
|
||||
// Compute the new number of arguments in the signature if 32 bit ints
|
||||
// must be converted to longs. Needed if CCallingConventionRequiresIntsAsLongs
|
||||
// is true.
|
||||
static int convert_ints_to_longints_argcnt(int in_args_count, BasicType* in_sig_bt);
|
||||
// Adapt a method's signature if it contains 32 bit integers that must
|
||||
// be converted to longs. Needed if CCallingConventionRequiresIntsAsLongs
|
||||
// is true.
|
||||
static void convert_ints_to_longints(int i2l_argcnt, int& in_args_count,
|
||||
BasicType*& in_sig_bt, VMRegPair*& in_regs);
|
||||
|
||||
static size_t trampoline_size();
|
||||
|
||||
static void generate_trampoline(MacroAssembler *masm, address destination);
|
||||
|
@ -238,9 +238,14 @@ void SimpleThresholdPolicy::compile(const methodHandle& mh, int bci, CompLevel l
|
||||
}
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
// We can't compile with a JVMCI compiler until the module system is initialized.
|
||||
if (level == CompLevel_full_optimization && UseJVMCICompiler && !Universe::is_module_initialized()) {
|
||||
return;
|
||||
// We can't compile with a JVMCI compiler until the module system is initialized past
|
||||
// phase 3. The JVMCI API itself isn't available until phase 2 and ServiceLoader isn't
|
||||
// usable until after phase 3.
|
||||
if (level == CompLevel_full_optimization && EnableJVMCI && UseJVMCICompiler) {
|
||||
if (SystemDictionary::java_system_loader() == NULL) {
|
||||
return;
|
||||
}
|
||||
assert(Universe::is_module_initialized(), "must be");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -3770,6 +3770,13 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
|
||||
// Final system initialization including security manager and system class loader
|
||||
call_initPhase3(CHECK_JNI_ERR);
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
if (EnableJVMCI && UseJVMCICompiler && (!UseInterpreter || !BackgroundCompilation)) {
|
||||
// 8145270: Force initialization of JVMCI runtime otherwise requests for blocking
|
||||
// compilations via JVMCI will not actually block until JVMCI is initialized.
|
||||
JVMCIRuntime::force_initialization(CHECK_JNI_ERR);
|
||||
}
|
||||
#endif
|
||||
// cache the system class loader
|
||||
SystemDictionary::compute_java_system_loader(CHECK_(JNI_ERR));
|
||||
|
||||
|
@ -951,7 +951,7 @@ enum CompLevel {
|
||||
CompLevel_full_profile = 3, // C1, invocation & backedge counters + mdo
|
||||
CompLevel_full_optimization = 4, // C2, Shark or JVMCI
|
||||
|
||||
#if defined(COMPILER2) || defined(SHARK) || INCLUDE_JVMCI
|
||||
#if defined(COMPILER2) || defined(SHARK)
|
||||
CompLevel_highest_tier = CompLevel_full_optimization, // pure C2 and tiered or JVMCI and tiered
|
||||
#elif defined(COMPILER1)
|
||||
CompLevel_highest_tier = CompLevel_simple, // pure C1 or JVMCI
|
||||
|
@ -130,8 +130,8 @@ jre = \
|
||||
# Tests that require the full JRE
|
||||
#
|
||||
needs_jre = \
|
||||
compiler/c2/6852078/Test6852078.java \
|
||||
compiler/c2/7047069/Test7047069.java \
|
||||
compiler/c2/Test6852078.java \
|
||||
compiler/c2/Test7047069.java \
|
||||
runtime/6294277/SourceDebugExtension.java \
|
||||
runtime/ClassFile/JsrRewriting.java \
|
||||
runtime/ClassFile/OomWhileParsingRepeatedJsr.java \
|
||||
@ -277,16 +277,16 @@ hotspot_fast_compiler_1 = \
|
||||
compiler/arraycopy/ \
|
||||
compiler/c1/ \
|
||||
compiler/c2/ \
|
||||
-compiler/c2/5091921/Test6850611.java \
|
||||
-compiler/c2/5091921/Test6890943.java \
|
||||
-compiler/c2/5091921/Test6905845.java \
|
||||
-compiler/c2/6340864 \
|
||||
-compiler/c2/6589834 \
|
||||
-compiler/c2/6603011 \
|
||||
-compiler/c2/6912517 \
|
||||
-compiler/c2/6792161 \
|
||||
-compiler/c2/7070134 \
|
||||
-compiler/c2/8004867
|
||||
-compiler/c2/Test6850611.java \
|
||||
-compiler/c2/cr6890943/Test6890943.java \
|
||||
-compiler/c2/Test6905845.java \
|
||||
-compiler/c2/cr6340864 \
|
||||
-compiler/c2/cr6589834 \
|
||||
-compiler/c2/cr8004867
|
||||
-compiler/c2/stemmer \
|
||||
-compiler/c2/Test6792161.java \
|
||||
-compiler/c2/Test6603011.java \
|
||||
-compiler/c2/Test6912517.java \
|
||||
|
||||
hotspot_fast_compiler_2 = \
|
||||
compiler/classUnloading/ \
|
||||
@ -303,7 +303,7 @@ hotspot_fast_compiler_2 = \
|
||||
compiler/integerArithmetic/ \
|
||||
compiler/interpreter/ \
|
||||
compiler/jvmci/ \
|
||||
-compiler/codegen/7184394 \
|
||||
-compiler/codegen/aes \
|
||||
-compiler/codecache/stress \
|
||||
-compiler/gcbarriers/PreserveFPRegistersTest.java
|
||||
|
||||
@ -320,13 +320,13 @@ hotspot_fast_compiler_3 = \
|
||||
compiler/types/ \
|
||||
compiler/uncommontrap/ \
|
||||
compiler/unsafe/ \
|
||||
-compiler/intrinsics/adler32 \
|
||||
-compiler/intrinsics/bmi \
|
||||
-compiler/intrinsics/mathexact \
|
||||
-compiler/intrinsics/multiplytolen \
|
||||
-compiler/intrinsics/sha \
|
||||
-compiler/loopopts/7052494 \
|
||||
-compiler/runtime/6826736
|
||||
-compiler/intrinsics/bigInteger/TestMultiplyToLen.java \
|
||||
-compiler/intrinsics/zip/TestAdler32.java \
|
||||
-compiler/loopopts/Test7052494.java \
|
||||
-compiler/runtime/Test6826736.java
|
||||
|
||||
hotspot_fast_compiler_closed = \
|
||||
sanity/ExecuteInternalVMTests.java
|
||||
|
@ -21,7 +21,10 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
import jdk.test.lib.cli.*;
|
||||
package compiler.arguments;
|
||||
|
||||
import jdk.test.lib.cli.CPUSpecificCommandLineOptionTest;
|
||||
import jdk.test.lib.cli.CommandLineOptionTest;
|
||||
|
||||
/**
|
||||
* Base class for all X86 bit manipulation related command line options.
|
||||
|
@ -21,8 +21,10 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
import jdk.test.lib.*;
|
||||
import jdk.test.lib.cli.*;
|
||||
package compiler.arguments;
|
||||
|
||||
import jdk.test.lib.ExitCode;
|
||||
import jdk.test.lib.cli.CommandLineOptionTest;
|
||||
|
||||
/**
|
||||
* Test on bit manipulation related command line options,
|
||||
|
@ -21,8 +21,11 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
import jdk.test.lib.*;
|
||||
import jdk.test.lib.cli.*;
|
||||
package compiler.arguments;
|
||||
|
||||
import jdk.test.lib.ExitCode;
|
||||
import jdk.test.lib.Platform;
|
||||
import jdk.test.lib.cli.CommandLineOptionTest;
|
||||
|
||||
/**
|
||||
* Test on bit manipulation related command line options,
|
||||
|
@ -21,19 +21,22 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
import jdk.test.lib.*;
|
||||
|
||||
/*
|
||||
* @test CheckCheckCICompilerCount
|
||||
* @bug 8130858
|
||||
* @bug 8132525
|
||||
* @summary Check that correct range of values for CICompilerCount are allowed depending on whether tiered is enabled or not
|
||||
* @library /testlibrary
|
||||
* @library /testlibrary /
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @run main CheckCICompilerCount
|
||||
* @run driver compiler.arguments.CheckCICompilerCount
|
||||
*/
|
||||
|
||||
package compiler.arguments;
|
||||
|
||||
import jdk.test.lib.OutputAnalyzer;
|
||||
import jdk.test.lib.ProcessTools;
|
||||
|
||||
public class CheckCICompilerCount {
|
||||
private static final String[][] NON_TIERED_ARGUMENTS = {
|
||||
{
|
||||
|
@ -21,18 +21,21 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
import jdk.test.lib.*;
|
||||
|
||||
/*
|
||||
* @test CheckCompileThresholdScaling
|
||||
* @bug 8059604
|
||||
* @summary "Add CompileThresholdScaling flag to control when methods are first compiled (with +/-TieredCompilation)"
|
||||
* @summary Add CompileThresholdScaling flag to control when methods are first compiled (with +/-TieredCompilation)
|
||||
* @library /testlibrary
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @run main CheckCompileThresholdScaling
|
||||
* @run driver compiler.arguments.CheckCompileThresholdScaling
|
||||
*/
|
||||
|
||||
package compiler.arguments;
|
||||
|
||||
import jdk.test.lib.OutputAnalyzer;
|
||||
import jdk.test.lib.ProcessTools;
|
||||
|
||||
public class CheckCompileThresholdScaling {
|
||||
|
||||
// The flag CompileThresholdScaling scales compilation thresholds
|
||||
|
@ -26,19 +26,19 @@
|
||||
* @bug 8031321
|
||||
* @summary Verify processing of UseBMI1Instructions option on CPU with
|
||||
* BMI1 feature support.
|
||||
* @library /testlibrary /test/lib
|
||||
* @library /testlibrary /test/lib /
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build TestUseBMI1InstructionsOnSupportedCPU
|
||||
* BMISupportedCPUTest
|
||||
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
* @build compiler.arguments.TestUseBMI1InstructionsOnSupportedCPU
|
||||
* compiler.arguments.BMISupportedCPUTest
|
||||
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||
* -XX:+WhiteBoxAPI TestUseBMI1InstructionsOnSupportedCPU
|
||||
* -XX:+WhiteBoxAPI
|
||||
* compiler.arguments.TestUseBMI1InstructionsOnSupportedCPU
|
||||
*/
|
||||
|
||||
import sun.hotspot.cpuinfo.CPUInfo;
|
||||
import jdk.test.lib.*;
|
||||
package compiler.arguments;
|
||||
|
||||
public class TestUseBMI1InstructionsOnSupportedCPU
|
||||
extends BMISupportedCPUTest {
|
||||
|
@ -26,20 +26,19 @@
|
||||
* @bug 8031321
|
||||
* @summary Verify processing of UseBMI1Instructions option on CPU without
|
||||
* BMI1 feature support.
|
||||
* @library /testlibrary /test/lib
|
||||
* @library /testlibrary /test/lib /
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build TestUseBMI1InstructionsOnUnsupportedCPU
|
||||
* BMIUnsupportedCPUTest
|
||||
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
* @build compiler.arguments.TestUseBMI1InstructionsOnUnsupportedCPU
|
||||
* compiler.arguments.BMIUnsupportedCPUTest
|
||||
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||
* -XX:+WhiteBoxAPI TestUseBMI1InstructionsOnUnsupportedCPU
|
||||
* -XX:+WhiteBoxAPI
|
||||
* compiler.arguments.TestUseBMI1InstructionsOnUnsupportedCPU
|
||||
*/
|
||||
|
||||
import sun.hotspot.cpuinfo.CPUInfo;
|
||||
import jdk.test.lib.*;
|
||||
import jdk.test.lib.cli.*;
|
||||
package compiler.arguments;
|
||||
|
||||
public class TestUseBMI1InstructionsOnUnsupportedCPU
|
||||
extends BMIUnsupportedCPUTest {
|
||||
|
@ -25,10 +25,13 @@
|
||||
* @test TestUseCompiler
|
||||
* @bug 8086068
|
||||
* @summary Tests execution with inconsistent UseCompiler flag combination.
|
||||
* @run main/othervm -Xint -XX:+UseCompiler TestUseCompiler
|
||||
* @run main/othervm -XX:+UseCompiler -Xint TestUseCompiler
|
||||
*
|
||||
* @run main/othervm -Xint -XX:+UseCompiler compiler.arguments.TestUseCompiler
|
||||
* @run main/othervm -XX:+UseCompiler -Xint compiler.arguments.TestUseCompiler
|
||||
*/
|
||||
|
||||
package compiler.arguments;
|
||||
|
||||
public class TestUseCompiler {
|
||||
|
||||
public static void main(String args[]) {
|
||||
|
@ -26,20 +26,19 @@
|
||||
* @bug 8031321
|
||||
* @summary Verify processing of UseCountLeadingZerosInstruction option
|
||||
* on CPU with LZCNT support.
|
||||
* @library /testlibrary /test/lib
|
||||
* @library /testlibrary /test/lib /
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build TestUseCountLeadingZerosInstructionOnSupportedCPU
|
||||
* BMISupportedCPUTest
|
||||
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
*
|
||||
* @build compiler.arguments.TestUseCountLeadingZerosInstructionOnSupportedCPU
|
||||
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||
* -XX:+WhiteBoxAPI
|
||||
* TestUseCountLeadingZerosInstructionOnSupportedCPU
|
||||
* compiler.arguments.TestUseCountLeadingZerosInstructionOnSupportedCPU
|
||||
*/
|
||||
|
||||
import sun.hotspot.cpuinfo.CPUInfo;
|
||||
import jdk.test.lib.*;
|
||||
package compiler.arguments;
|
||||
|
||||
public class TestUseCountLeadingZerosInstructionOnSupportedCPU
|
||||
extends BMISupportedCPUTest {
|
||||
|
@ -26,20 +26,19 @@
|
||||
* @bug 8031321
|
||||
* @summary Verify processing of UseCountLeadingZerosInstruction option
|
||||
* on CPU without LZCNT support.
|
||||
* @library /testlibrary /test/lib
|
||||
* @library /testlibrary /test/lib /
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build TestUseCountLeadingZerosInstructionOnUnsupportedCPU
|
||||
* BMIUnsupportedCPUTest
|
||||
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
*
|
||||
* @build compiler.arguments.TestUseCountLeadingZerosInstructionOnUnsupportedCPU
|
||||
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||
* -XX:+WhiteBoxAPI
|
||||
* TestUseCountLeadingZerosInstructionOnUnsupportedCPU
|
||||
* compiler.arguments.TestUseCountLeadingZerosInstructionOnUnsupportedCPU
|
||||
*/
|
||||
|
||||
import sun.hotspot.cpuinfo.CPUInfo;
|
||||
import jdk.test.lib.*;
|
||||
package compiler.arguments;
|
||||
|
||||
public class TestUseCountLeadingZerosInstructionOnUnsupportedCPU
|
||||
extends BMIUnsupportedCPUTest {
|
||||
|
@ -26,21 +26,21 @@
|
||||
* @bug 8031321
|
||||
* @summary Verify processing of UseCountTrailingZerosInstruction option
|
||||
* on CPU with TZCNT (BMI1 feature) support.
|
||||
* @library /testlibrary /test/lib
|
||||
* @library /testlibrary /test/lib /
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build TestUseCountTrailingZerosInstructionOnSupportedCPU
|
||||
* BMISupportedCPUTest
|
||||
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
*
|
||||
* @build compiler.arguments.TestUseCountTrailingZerosInstructionOnSupportedCPU
|
||||
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||
* -XX:+WhiteBoxAPI
|
||||
* TestUseCountTrailingZerosInstructionOnSupportedCPU
|
||||
* compiler.arguments.TestUseCountTrailingZerosInstructionOnSupportedCPU
|
||||
*/
|
||||
|
||||
import sun.hotspot.cpuinfo.CPUInfo;
|
||||
import jdk.test.lib.*;
|
||||
import jdk.test.lib.cli.*;
|
||||
package compiler.arguments;
|
||||
|
||||
import jdk.test.lib.cli.CommandLineOptionTest;
|
||||
|
||||
public class TestUseCountTrailingZerosInstructionOnSupportedCPU
|
||||
extends BMISupportedCPUTest {
|
||||
|
@ -26,21 +26,21 @@
|
||||
* @bug 8031321
|
||||
* @summary Verify processing of UseCountTrailingZerosInstruction option
|
||||
* on CPU without TZCNT instruction (BMI1 feature) support.
|
||||
* @library /testlibrary /test/lib
|
||||
* @library /testlibrary /test/lib /
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build TestUseCountTrailingZerosInstructionOnUnsupportedCPU
|
||||
* BMIUnsupportedCPUTest
|
||||
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
*
|
||||
* @build compiler.arguments.TestUseCountTrailingZerosInstructionOnUnsupportedCPU
|
||||
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||
* -XX:+WhiteBoxAPI
|
||||
* TestUseCountTrailingZerosInstructionOnUnsupportedCPU
|
||||
* compiler.arguments.TestUseCountTrailingZerosInstructionOnUnsupportedCPU
|
||||
*/
|
||||
|
||||
import sun.hotspot.cpuinfo.CPUInfo;
|
||||
import jdk.test.lib.*;
|
||||
import jdk.test.lib.cli.*;
|
||||
package compiler.arguments;
|
||||
|
||||
import jdk.test.lib.cli.CommandLineOptionTest;
|
||||
|
||||
public class TestUseCountTrailingZerosInstructionOnUnsupportedCPU
|
||||
extends BMIUnsupportedCPUTest {
|
||||
|
@ -25,10 +25,14 @@
|
||||
* @test
|
||||
* @bug 8073792
|
||||
* @summary assert broken when array size becomes known during igvn
|
||||
* @run main/othervm -Xcomp -XX:CompileOnly=TestArrayCloneBadAssert.m TestArrayCloneBadAssert
|
||||
*
|
||||
* @run main/othervm -Xcomp
|
||||
* -XX:CompileCommand=compileonly,compiler.arraycopy.TestArrayCloneBadAssert::m
|
||||
* compiler.arraycopy.TestArrayCloneBadAssert
|
||||
*/
|
||||
|
||||
package compiler.arraycopy;
|
||||
|
||||
public class TestArrayCloneBadAssert {
|
||||
|
||||
static final int[] array = new int[5];
|
||||
|
@ -25,13 +25,22 @@
|
||||
* @test
|
||||
* @bug 6912521
|
||||
* @summary small array copy as loads/stores
|
||||
* @compile TestArrayCopyAsLoadsStores.java TestArrayCopyUtils.java
|
||||
* @run main/othervm -ea -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:CompileCommand=dontinline,TestArrayCopyAsLoadsStores::m* -XX:TypeProfileLevel=200 TestArrayCopyAsLoadsStores
|
||||
* @run main/othervm -ea -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:CompileCommand=dontinline,TestArrayCopyAsLoadsStores::m* -XX:+IgnoreUnrecognizedVMOptions -XX:+StressArrayCopyMacroNode -XX:TypeProfileLevel=200 TestArrayCopyAsLoadsStores
|
||||
* @library /
|
||||
*
|
||||
* @run main/othervm -ea -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
|
||||
* -XX:CompileCommand=dontinline,compiler.arraycopy.TestArrayCopyAsLoadsStores::m*
|
||||
* -XX:TypeProfileLevel=200
|
||||
* compiler.arraycopy.TestArrayCopyAsLoadsStores
|
||||
* @run main/othervm -ea -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
|
||||
* -XX:CompileCommand=dontinline,compiler.arraycopy.TestArrayCopyAsLoadsStores::m*
|
||||
* -XX:TypeProfileLevel=200
|
||||
* -XX:+IgnoreUnrecognizedVMOptions -XX:+StressArrayCopyMacroNode
|
||||
* compiler.arraycopy.TestArrayCopyAsLoadsStores
|
||||
*/
|
||||
|
||||
import java.util.*;
|
||||
package compiler.arraycopy;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
public class TestArrayCopyAsLoadsStores extends TestArrayCopyUtils {
|
||||
|
||||
|
@ -25,10 +25,13 @@
|
||||
* @test
|
||||
* @bug 8073866
|
||||
* @summary Fix for 8064703 may also cause stores between the allocation and arraycopy to be rexecuted after a deoptimization
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestArrayCopyBadReexec
|
||||
*
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
|
||||
* compiler.arraycopy.TestArrayCopyBadReexec
|
||||
*/
|
||||
|
||||
package compiler.arraycopy;
|
||||
|
||||
public class TestArrayCopyBadReexec {
|
||||
|
||||
static int val;
|
||||
|
@ -25,10 +25,13 @@
|
||||
* @test
|
||||
* @bug 7173584
|
||||
* @summary arraycopy as macro node
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestArrayCopyMacro
|
||||
*
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
|
||||
* compiler.arraycopy.TestArrayCopyMacro
|
||||
*/
|
||||
|
||||
package compiler.arraycopy;
|
||||
|
||||
public class TestArrayCopyMacro {
|
||||
static class A {
|
||||
}
|
||||
|
@ -25,10 +25,13 @@
|
||||
* @test
|
||||
* @bug 8064703
|
||||
* @summary Deoptimization between array allocation and arraycopy may result in non initialized array
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:TypeProfileLevel=020 TestArrayCopyNoInit
|
||||
*
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:TypeProfileLevel=020
|
||||
* compiler.arraycopy.TestArrayCopyNoInit
|
||||
*/
|
||||
|
||||
package compiler.arraycopy;
|
||||
|
||||
public class TestArrayCopyNoInit {
|
||||
|
||||
static int[] m1(int[] src) {
|
||||
|
@ -25,22 +25,26 @@
|
||||
* @test
|
||||
* @bug 8072016
|
||||
* @summary Infinite deoptimization/recompilation cycles in case of arraycopy with tightly coupled allocation
|
||||
* @requires vm.flavor == "server"
|
||||
* @library /testlibrary /test/lib /
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build TestArrayCopyNoInitDeopt
|
||||
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
|
||||
*
|
||||
* @build compiler.arraycopy.TestArrayCopyNoInitDeopt
|
||||
* @run driver ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
* jdk.test.lib.Platform
|
||||
* @run main/othervm -Xmixed -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
* -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:TypeProfileLevel=020
|
||||
* TestArrayCopyNoInitDeopt
|
||||
* compiler.arraycopy.TestArrayCopyNoInitDeopt
|
||||
*/
|
||||
|
||||
import sun.hotspot.WhiteBox;
|
||||
import sun.hotspot.code.NMethod;
|
||||
import jdk.test.lib.Platform;
|
||||
import java.lang.reflect.*;
|
||||
package compiler.arraycopy;
|
||||
|
||||
import compiler.whitebox.CompilerWhiteBoxTest;
|
||||
import jdk.test.lib.Platform;
|
||||
import sun.hotspot.WhiteBox;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
|
||||
public class TestArrayCopyNoInitDeopt {
|
||||
|
||||
@ -83,9 +87,11 @@ public class TestArrayCopyNoInitDeopt {
|
||||
}
|
||||
|
||||
static public void main(String[] args) throws Exception {
|
||||
if (!Platform.isServer()) {
|
||||
throw new Error("TESTBUG: Not server VM");
|
||||
}
|
||||
// Only execute if C2 is available
|
||||
if (Platform.isServer() &&
|
||||
TIERED_STOP_AT_LEVEL == CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION) {
|
||||
if (TIERED_STOP_AT_LEVEL == CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION) {
|
||||
int[] src = new int[10];
|
||||
Object src_obj = new Object();
|
||||
Method method_m1 = TestArrayCopyNoInitDeopt.class.getMethod("m1", Object.class);
|
||||
|
@ -25,10 +25,13 @@
|
||||
* @test
|
||||
* @bug 8074676
|
||||
* @summary after guards in Arrays.copyOf() intrinsic, control may become top
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestArrayCopyOfStopped
|
||||
*
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
|
||||
* compiler.arraycopy.TestArrayCopyOfStopped
|
||||
*/
|
||||
|
||||
package compiler.arraycopy;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
public class TestArrayCopyOfStopped {
|
||||
|
@ -28,10 +28,13 @@
|
||||
* are properly sign extended to 64 bit (e.g., PPC64, s390x). This can fail
|
||||
* if slow_arraycopy_C() is commpiled by the C compiler without any imlicit
|
||||
* casts (as spill stores to the stack that are done with 4-byte instruction).
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestArrayCopyOverflowArguments
|
||||
*
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
|
||||
* compiler.arraycopy.TestArrayCopyOverflowArguments
|
||||
*/
|
||||
|
||||
package compiler.arraycopy;
|
||||
|
||||
public class TestArrayCopyOverflowArguments {
|
||||
|
||||
// Without volatile the overflowing computation was moved up and then
|
||||
|
@ -25,9 +25,13 @@
|
||||
* @test
|
||||
* @bug 8134468
|
||||
* @summary test that checks whether an array load falls into the range of an arraycopy is incorrect on 32bits
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestArrayCopyOverflowInBoundChecks
|
||||
*
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
|
||||
* compiler.arraycopy.TestArrayCopyOverflowInBoundChecks
|
||||
*/
|
||||
|
||||
package compiler.arraycopy;
|
||||
|
||||
public class TestArrayCopyOverflowInBoundChecks {
|
||||
|
||||
static byte[] src_array = { 'a', 'b', 'c', 'd', 'e' };
|
||||
|
@ -25,10 +25,16 @@
|
||||
* @test
|
||||
* @bug 8075921
|
||||
* @summary control becomes top after arraycopy guards and confuses tighly coupled allocation logic
|
||||
* @run main/othervm -Xcomp -XX:CompileOnly=TestArrayCopyStoppedAfterGuards.test,System.arraycopy TestArrayCopyStoppedAfterGuards
|
||||
*
|
||||
* @run main/othervm -Xcomp
|
||||
* -XX:CompileCommand=compileonly,java.lang.System::arraycopy
|
||||
* -XX:CompileCommand=compileonly,compiler.arraycopy.TestArrayCopyStoppedAfterGuards::test
|
||||
* compiler.arraycopy.TestArrayCopyStoppedAfterGuards
|
||||
*
|
||||
*/
|
||||
|
||||
package compiler.arraycopy;
|
||||
|
||||
public class TestArrayCopyStoppedAfterGuards {
|
||||
|
||||
static void test() {
|
||||
|
@ -21,9 +21,13 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
import java.lang.annotation.*;
|
||||
import java.lang.reflect.*;
|
||||
import java.util.*;
|
||||
package compiler.arraycopy;
|
||||
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.reflect.Method;
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.util.HashMap;
|
||||
|
||||
abstract class TestArrayCopyUtils {
|
||||
public enum ArraySrc {
|
||||
|
@ -25,10 +25,13 @@
|
||||
* @test
|
||||
* @bug 8055910
|
||||
* @summary Arrays.copyOf doesn't perform subtype check
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestArraysCopyOfNoTypeCheck
|
||||
*
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
|
||||
* compiler.arraycopy.TestArraysCopyOfNoTypeCheck
|
||||
*/
|
||||
|
||||
package compiler.arraycopy;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
public class TestArraysCopyOfNoTypeCheck {
|
||||
|
@ -25,10 +25,13 @@
|
||||
* @test
|
||||
* @bug 8080699
|
||||
* @summary eliminated arraycopy node still reachable through exception edges
|
||||
* @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation TestDeadArrayCopyOnMemChain
|
||||
*
|
||||
* @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation
|
||||
* compiler.arraycopy.TestDeadArrayCopyOnMemChain
|
||||
*/
|
||||
|
||||
package compiler.arraycopy;
|
||||
|
||||
public class TestDeadArrayCopyOnMemChain {
|
||||
static class A {
|
||||
int f;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user