Merge
This commit is contained in:
commit
aa0d0aa80d
hotspot
make/share/makefiles
makefiles
src
cpu
aarch64/vm
aarch64.adassembler_aarch64.hppc1_LIRAssembler_aarch64.cppc2_globals_aarch64.hppdebug_aarch64.cppframe_aarch64.hppmacroAssembler_aarch64.cppmacroAssembler_aarch64.hppnativeInst_aarch64.hppsharedRuntime_aarch64.cppstubGenerator_aarch64.cpptemplateInterpreterGenerator_aarch64.cppvm_version_aarch64.hpp
ppc/vm
assembler_ppc.hppassembler_ppc.inline.hppc2_globals_ppc.hppdebug_ppc.cppframe_ppc.hppnativeInst_ppc.hppsharedRuntime_ppc.cppstubGenerator_ppc.cppvm_version_ppc.cpp
sparc/vm
c2_globals_sparc.hppdebug_sparc.cppframe_sparc.hppmetaspaceShared_sparc.cppnativeInst_sparc.hppsharedRuntime_sparc.cppstubGenerator_sparc.cpp
x86/vm
assembler_x86.cppassembler_x86.hppc2_globals_x86.hppdebug_x86.cppframe_x86.hppmacroAssembler_x86.hppnativeInst_x86.hppsharedRuntime_x86_32.cppsharedRuntime_x86_64.cppstubGenerator_x86_32.cppstubGenerator_x86_64.cpp
zero/vm
jdk.hotspot.agent/share/classes/sun/jvm/hotspot
jdk.vm.ci/share/classes
jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot
jdk.vm.ci.meta/src/jdk/vm/ci/meta
os
aix/vm
bsd/vm
linux/vm
posix/vm
solaris/vm
windows/vm
os_cpu
bsd_x86/vm
linux_aarch64/vm
linux_sparc/vm
linux_x86/vm
solaris_sparc/vm
solaris_x86/vm
windows_x86/vm
share/vm
asm
c1
ci
classfile
classFileStream.hppjavaClasses.cppjavaClasses.hppjavaClasses.inline.hppsystemDictionary.cppsystemDictionary.hpp
code
compiler
gc
@ -41,7 +41,6 @@
|
||||
JVM_DumpAllStacks;
|
||||
JVM_DumpThreads;
|
||||
JVM_FillInStackTrace;
|
||||
JVM_FillStackFrames;
|
||||
JVM_FindClassFromCaller;
|
||||
JVM_FindClassFromClass;
|
||||
JVM_FindClassFromBootLoader;
|
||||
@ -157,13 +156,13 @@
|
||||
JVM_SetClassSigners;
|
||||
JVM_SetNativeThreadName;
|
||||
JVM_SetPrimitiveArrayElement;
|
||||
JVM_SetMethodInfo;
|
||||
JVM_SetThreadPriority;
|
||||
JVM_Sleep;
|
||||
JVM_StartThread;
|
||||
JVM_StopThread;
|
||||
JVM_SuspendThread;
|
||||
JVM_SupportsCX8;
|
||||
JVM_ToStackTraceElement;
|
||||
JVM_TotalMemory;
|
||||
JVM_UnloadLibrary;
|
||||
JVM_Yield;
|
||||
|
@ -41,9 +41,8 @@ ifeq ($(call check-jvm-feature, compiler2), true)
|
||||
ADLC_CFLAGS := -m64
|
||||
ADLC_CFLAGS_WARNINGS := +w
|
||||
else ifeq ($(OPENJDK_BUILD_OS), aix)
|
||||
# FIXME: Not implemented. These flags are likely, however
|
||||
# ADLC_LDFLAGS := -q64
|
||||
# ADLC_CFLAGS := -qnortti -qnoeh -q64
|
||||
ADLC_LDFLAGS := -q64
|
||||
ADLC_CFLAGS := -qnortti -qeh -q64 -DAIX
|
||||
else ifeq ($(OPENJDK_BUILD_OS), windows)
|
||||
ADLC_LDFLAGS := -nologo
|
||||
ADLC_CFLAGS := -nologo -EHsc
|
||||
@ -89,7 +88,7 @@ ifeq ($(call check-jvm-feature, compiler2), true)
|
||||
else ifeq ($(OPENJDK_TARGET_OS), solaris)
|
||||
ADLCFLAGS += -DSOLARIS=1 -DSPARC_WORKS=1
|
||||
else ifeq ($(OPENJDK_TARGET_OS), aix)
|
||||
# FIXME: Not implemented
|
||||
ADLCFLAGS += -DAIX=1
|
||||
else ifeq ($(OPENJDK_TARGET_OS), macosx)
|
||||
ADLCFLAGS += -D_ALLBSD_SOURCE=1 -D_GNU_SOURCE=1
|
||||
endif
|
||||
|
@ -58,7 +58,6 @@ JVM_DoPrivileged
|
||||
JVM_DumpAllStacks
|
||||
JVM_DumpThreads
|
||||
JVM_FillInStackTrace
|
||||
JVM_FillStackFrames
|
||||
JVM_FindClassFromCaller
|
||||
JVM_FindClassFromClass
|
||||
JVM_FindLibraryEntry
|
||||
@ -169,7 +168,6 @@ JVM_ReleaseUTF
|
||||
JVM_ResumeThread
|
||||
JVM_SetArrayElement
|
||||
JVM_SetClassSigners
|
||||
JVM_SetMethodInfo
|
||||
JVM_SetNativeThreadName
|
||||
JVM_SetPrimitiveArrayElement
|
||||
JVM_SetThreadPriority
|
||||
@ -178,6 +176,7 @@ JVM_StartThread
|
||||
JVM_StopThread
|
||||
JVM_SupportsCX8
|
||||
JVM_SuspendThread
|
||||
JVM_ToStackTraceElement
|
||||
JVM_TotalMemory
|
||||
JVM_UnloadLibrary
|
||||
JVM_Yield
|
||||
|
@ -14242,6 +14242,48 @@ instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl
|
||||
ins_pipe(pipe_cmp_branch);
|
||||
%}
|
||||
|
||||
instruct cmpUI_imm0_branch(cmpOpU cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
|
||||
match(If cmp (CmpU op1 op2));
|
||||
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
|
||||
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq
|
||||
|| n->in(1)->as_Bool()->_test._test == BoolTest::gt
|
||||
|| n->in(1)->as_Bool()->_test._test == BoolTest::le);
|
||||
effect(USE labl);
|
||||
|
||||
ins_cost(BRANCH_COST);
|
||||
format %{ "cbw$cmp $op1, $labl" %}
|
||||
ins_encode %{
|
||||
Label* L = $labl$$label;
|
||||
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
|
||||
if (cond == Assembler::EQ || cond == Assembler::LS)
|
||||
__ cbzw($op1$$Register, *L);
|
||||
else
|
||||
__ cbnzw($op1$$Register, *L);
|
||||
%}
|
||||
ins_pipe(pipe_cmp_branch);
|
||||
%}
|
||||
|
||||
instruct cmpUL_imm0_branch(cmpOpU cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
|
||||
match(If cmp (CmpU op1 op2));
|
||||
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
|
||||
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq
|
||||
|| n->in(1)->as_Bool()->_test._test == BoolTest::gt
|
||||
|| n->in(1)->as_Bool()->_test._test == BoolTest::le);
|
||||
effect(USE labl);
|
||||
|
||||
ins_cost(BRANCH_COST);
|
||||
format %{ "cb$cmp $op1, $labl" %}
|
||||
ins_encode %{
|
||||
Label* L = $labl$$label;
|
||||
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
|
||||
if (cond == Assembler::EQ || cond == Assembler::LS)
|
||||
__ cbz($op1$$Register, *L);
|
||||
else
|
||||
__ cbnz($op1$$Register, *L);
|
||||
%}
|
||||
ins_pipe(pipe_cmp_branch);
|
||||
%}
|
||||
|
||||
// Test bit and Branch
|
||||
|
||||
// Patterns for short (< 32KiB) variants
|
||||
|
@ -1221,6 +1221,38 @@ public:
|
||||
INSN(caspal, true, true)
|
||||
#undef INSN
|
||||
|
||||
// 8.1 Atomic operations
|
||||
void lse_atomic(Register Rs, Register Rt, Register Rn,
|
||||
enum operand_size sz, int op1, int op2, bool a, bool r) {
|
||||
starti;
|
||||
f(sz, 31, 30), f(0b111000, 29, 24), f(a, 23), f(r, 22), f(1, 21);
|
||||
rf(Rs, 16), f(op1, 15), f(op2, 14, 12), f(0, 11, 10), rf(Rn, 5), zrf(Rt, 0);
|
||||
}
|
||||
|
||||
#define INSN(NAME, NAME_A, NAME_L, NAME_AL, op1, op2) \
|
||||
void NAME(operand_size sz, Register Rs, Register Rt, Register Rn) { \
|
||||
lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, false); \
|
||||
} \
|
||||
void NAME_A(operand_size sz, Register Rs, Register Rt, Register Rn) { \
|
||||
lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, false); \
|
||||
} \
|
||||
void NAME_L(operand_size sz, Register Rs, Register Rt, Register Rn) { \
|
||||
lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, true); \
|
||||
} \
|
||||
void NAME_AL(operand_size sz, Register Rs, Register Rt, Register Rn) {\
|
||||
lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, true); \
|
||||
}
|
||||
INSN(ldadd, ldadda, ldaddl, ldaddal, 0, 0b000);
|
||||
INSN(ldbic, ldbica, ldbicl, ldbical, 0, 0b001);
|
||||
INSN(ldeor, ldeora, ldeorl, ldeoral, 0, 0b010);
|
||||
INSN(ldorr, ldorra, ldorrl, ldorral, 0, 0b011);
|
||||
INSN(ldsmax, ldsmaxa, ldsmaxl, ldsmaxal, 0, 0b100);
|
||||
INSN(ldsmin, ldsmina, ldsminl, ldsminal, 0, 0b101);
|
||||
INSN(ldumax, ldumaxa, ldumaxl, ldumaxal, 0, 0b110);
|
||||
INSN(ldumin, ldumina, lduminl, lduminal, 0, 0b111);
|
||||
INSN(swp, swpa, swpl, swpal, 1, 0b000);
|
||||
#undef INSN
|
||||
|
||||
// Load register (literal)
|
||||
#define INSN(NAME, opc, V) \
|
||||
void NAME(Register Rt, address dest) { \
|
||||
|
@ -1556,54 +1556,14 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
}
|
||||
|
||||
void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
|
||||
if (UseLSE) {
|
||||
__ mov(rscratch1, cmpval);
|
||||
__ casal(Assembler::word, rscratch1, newval, addr);
|
||||
__ cmpw(rscratch1, cmpval);
|
||||
__ cset(rscratch1, Assembler::NE);
|
||||
} else {
|
||||
Label retry_load, nope;
|
||||
// flush and load exclusive from the memory location
|
||||
// and fail if it is not what we expect
|
||||
__ prfm(Address(addr), PSTL1STRM);
|
||||
__ bind(retry_load);
|
||||
__ ldaxrw(rscratch1, addr);
|
||||
__ cmpw(rscratch1, cmpval);
|
||||
__ cset(rscratch1, Assembler::NE);
|
||||
__ br(Assembler::NE, nope);
|
||||
// if we store+flush with no intervening write rscratch1 wil be zero
|
||||
__ stlxrw(rscratch1, newval, addr);
|
||||
// retry so we only ever return after a load fails to compare
|
||||
// ensures we don't return a stale value after a failed write.
|
||||
__ cbnzw(rscratch1, retry_load);
|
||||
__ bind(nope);
|
||||
}
|
||||
__ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, rscratch1);
|
||||
__ cset(rscratch1, Assembler::NE);
|
||||
__ membar(__ AnyAny);
|
||||
}
|
||||
|
||||
void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
|
||||
if (UseLSE) {
|
||||
__ mov(rscratch1, cmpval);
|
||||
__ casal(Assembler::xword, rscratch1, newval, addr);
|
||||
__ cmp(rscratch1, cmpval);
|
||||
__ cset(rscratch1, Assembler::NE);
|
||||
} else {
|
||||
Label retry_load, nope;
|
||||
// flush and load exclusive from the memory location
|
||||
// and fail if it is not what we expect
|
||||
__ prfm(Address(addr), PSTL1STRM);
|
||||
__ bind(retry_load);
|
||||
__ ldaxr(rscratch1, addr);
|
||||
__ cmp(rscratch1, cmpval);
|
||||
__ cset(rscratch1, Assembler::NE);
|
||||
__ br(Assembler::NE, nope);
|
||||
// if we store+flush with no intervening write rscratch1 wil be zero
|
||||
__ stlxr(rscratch1, newval, addr);
|
||||
// retry so we only ever return after a load fails to compare
|
||||
// ensures we don't return a stale value after a failed write.
|
||||
__ cbnz(rscratch1, retry_load);
|
||||
__ bind(nope);
|
||||
}
|
||||
__ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, rscratch1);
|
||||
__ cset(rscratch1, Assembler::NE);
|
||||
__ membar(__ AnyAny);
|
||||
}
|
||||
|
||||
@ -3121,38 +3081,32 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
|
||||
BasicType type = src->type();
|
||||
bool is_oop = type == T_OBJECT || type == T_ARRAY;
|
||||
|
||||
void (MacroAssembler::* lda)(Register Rd, Register Ra);
|
||||
void (MacroAssembler::* add)(Register Rd, Register Rn, RegisterOrConstant increment);
|
||||
void (MacroAssembler::* stl)(Register Rs, Register Rt, Register Rn);
|
||||
void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
|
||||
void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
|
||||
|
||||
switch(type) {
|
||||
case T_INT:
|
||||
lda = &MacroAssembler::ldaxrw;
|
||||
add = &MacroAssembler::addw;
|
||||
stl = &MacroAssembler::stlxrw;
|
||||
xchg = &MacroAssembler::atomic_xchgalw;
|
||||
add = &MacroAssembler::atomic_addalw;
|
||||
break;
|
||||
case T_LONG:
|
||||
lda = &MacroAssembler::ldaxr;
|
||||
add = &MacroAssembler::add;
|
||||
stl = &MacroAssembler::stlxr;
|
||||
xchg = &MacroAssembler::atomic_xchgal;
|
||||
add = &MacroAssembler::atomic_addal;
|
||||
break;
|
||||
case T_OBJECT:
|
||||
case T_ARRAY:
|
||||
if (UseCompressedOops) {
|
||||
lda = &MacroAssembler::ldaxrw;
|
||||
add = &MacroAssembler::addw;
|
||||
stl = &MacroAssembler::stlxrw;
|
||||
xchg = &MacroAssembler::atomic_xchgalw;
|
||||
add = &MacroAssembler::atomic_addalw;
|
||||
} else {
|
||||
lda = &MacroAssembler::ldaxr;
|
||||
add = &MacroAssembler::add;
|
||||
stl = &MacroAssembler::stlxr;
|
||||
xchg = &MacroAssembler::atomic_xchgal;
|
||||
add = &MacroAssembler::atomic_addal;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
lda = &MacroAssembler::ldaxr;
|
||||
add = &MacroAssembler::add;
|
||||
stl = &MacroAssembler::stlxr; // unreachable
|
||||
xchg = &MacroAssembler::atomic_xchgal;
|
||||
add = &MacroAssembler::atomic_addal; // unreachable
|
||||
}
|
||||
|
||||
switch (code) {
|
||||
@ -3170,14 +3124,8 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
|
||||
assert_different_registers(inc.as_register(), dst, addr.base(), tmp,
|
||||
rscratch1, rscratch2);
|
||||
}
|
||||
Label again;
|
||||
__ lea(tmp, addr);
|
||||
__ prfm(Address(tmp), PSTL1STRM);
|
||||
__ bind(again);
|
||||
(_masm->*lda)(dst, tmp);
|
||||
(_masm->*add)(rscratch1, dst, inc);
|
||||
(_masm->*stl)(rscratch2, rscratch1, tmp);
|
||||
__ cbnzw(rscratch2, again);
|
||||
(_masm->*add)(dst, inc, tmp);
|
||||
break;
|
||||
}
|
||||
case lir_xchg:
|
||||
@ -3186,17 +3134,12 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
|
||||
Register obj = as_reg(data);
|
||||
Register dst = as_reg(dest);
|
||||
if (is_oop && UseCompressedOops) {
|
||||
__ encode_heap_oop(rscratch1, obj);
|
||||
obj = rscratch1;
|
||||
__ encode_heap_oop(rscratch2, obj);
|
||||
obj = rscratch2;
|
||||
}
|
||||
assert_different_registers(obj, addr.base(), tmp, rscratch2, dst);
|
||||
Label again;
|
||||
assert_different_registers(obj, addr.base(), tmp, rscratch1, dst);
|
||||
__ lea(tmp, addr);
|
||||
__ prfm(Address(tmp), PSTL1STRM);
|
||||
__ bind(again);
|
||||
(_masm->*lda)(dst, tmp);
|
||||
(_masm->*stl)(rscratch2, obj, tmp);
|
||||
__ cbnzw(rscratch2, again);
|
||||
(_masm->*xchg)(dst, obj, tmp);
|
||||
if (is_oop && UseCompressedOops) {
|
||||
__ decode_heap_oop(dst);
|
||||
}
|
||||
|
@ -55,6 +55,7 @@ define_pd_global(intx, InteriorEntryAlignment, 16);
|
||||
define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
|
||||
define_pd_global(intx, LoopUnrollLimit, 60);
|
||||
define_pd_global(intx, LoopPercentProfileLimit, 10);
|
||||
define_pd_global(intx, PostLoopMultiversioning, false);
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(intx, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
|
||||
|
@ -30,6 +30,5 @@
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
void pd_ps(frame f) {}
|
||||
|
@ -27,7 +27,6 @@
|
||||
#define CPU_AARCH64_VM_FRAME_AARCH64_HPP
|
||||
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
// A frame represents a physical stack frame (an activation). Frames can be
|
||||
// C or Java frames, and the Java frames can be interpreted or compiled.
|
||||
|
@ -1637,6 +1637,11 @@ Address MacroAssembler::form_address(Register Rd, Register base, long byte_offse
|
||||
}
|
||||
|
||||
void MacroAssembler::atomic_incw(Register counter_addr, Register tmp, Register tmp2) {
|
||||
if (UseLSE) {
|
||||
mov(tmp, 1);
|
||||
ldadd(Assembler::word, tmp, zr, counter_addr);
|
||||
return;
|
||||
}
|
||||
Label retry_load;
|
||||
prfm(Address(counter_addr), PSTL1STRM);
|
||||
bind(retry_load);
|
||||
@ -2172,8 +2177,18 @@ static bool different(Register a, RegisterOrConstant b, Register c) {
|
||||
return a != b.as_register() && a != c && b.as_register() != c;
|
||||
}
|
||||
|
||||
#define ATOMIC_OP(LDXR, OP, IOP, STXR) \
|
||||
void MacroAssembler::atomic_##OP(Register prev, RegisterOrConstant incr, Register addr) { \
|
||||
#define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \
|
||||
void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \
|
||||
if (UseLSE) { \
|
||||
prev = prev->is_valid() ? prev : zr; \
|
||||
if (incr.is_register()) { \
|
||||
AOP(sz, incr.as_register(), prev, addr); \
|
||||
} else { \
|
||||
mov(rscratch2, incr.as_constant()); \
|
||||
AOP(sz, rscratch2, prev, addr); \
|
||||
} \
|
||||
return; \
|
||||
} \
|
||||
Register result = rscratch2; \
|
||||
if (prev->is_valid()) \
|
||||
result = different(prev, incr, addr) ? prev : rscratch2; \
|
||||
@ -2190,13 +2205,20 @@ void MacroAssembler::atomic_##OP(Register prev, RegisterOrConstant incr, Registe
|
||||
} \
|
||||
}
|
||||
|
||||
ATOMIC_OP(ldxr, add, sub, stxr)
|
||||
ATOMIC_OP(ldxrw, addw, subw, stxrw)
|
||||
ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword)
|
||||
ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word)
|
||||
ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword)
|
||||
ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word)
|
||||
|
||||
#undef ATOMIC_OP
|
||||
|
||||
#define ATOMIC_XCHG(OP, LDXR, STXR) \
|
||||
#define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \
|
||||
void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \
|
||||
if (UseLSE) { \
|
||||
prev = prev->is_valid() ? prev : zr; \
|
||||
AOP(sz, newv, prev, addr); \
|
||||
return; \
|
||||
} \
|
||||
Register result = rscratch2; \
|
||||
if (prev->is_valid()) \
|
||||
result = different(prev, newv, addr) ? prev : rscratch2; \
|
||||
@ -2211,8 +2233,10 @@ void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) {
|
||||
mov(prev, result); \
|
||||
}
|
||||
|
||||
ATOMIC_XCHG(xchg, ldxr, stxr)
|
||||
ATOMIC_XCHG(xchgw, ldxrw, stxrw)
|
||||
ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword)
|
||||
ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word)
|
||||
ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword)
|
||||
ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word)
|
||||
|
||||
#undef ATOMIC_XCHG
|
||||
|
||||
|
@ -957,9 +957,13 @@ public:
|
||||
|
||||
void atomic_add(Register prev, RegisterOrConstant incr, Register addr);
|
||||
void atomic_addw(Register prev, RegisterOrConstant incr, Register addr);
|
||||
void atomic_addal(Register prev, RegisterOrConstant incr, Register addr);
|
||||
void atomic_addalw(Register prev, RegisterOrConstant incr, Register addr);
|
||||
|
||||
void atomic_xchg(Register prev, Register newv, Register addr);
|
||||
void atomic_xchgw(Register prev, Register newv, Register addr);
|
||||
void atomic_xchgal(Register prev, Register newv, Register addr);
|
||||
void atomic_xchgalw(Register prev, Register newv, Register addr);
|
||||
|
||||
void orptr(Address adr, RegisterOrConstant src) {
|
||||
ldr(rscratch2, adr);
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
// We have interfaces for the following instructions:
|
||||
// - NativeInstruction
|
||||
|
@ -198,6 +198,16 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
|
||||
bool SharedRuntime::is_wide_vector(int size) {
|
||||
return size > 8;
|
||||
}
|
||||
|
||||
size_t SharedRuntime::trampoline_size() {
|
||||
return 16;
|
||||
}
|
||||
|
||||
void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
|
||||
__ mov(rscratch1, destination);
|
||||
__ br(rscratch1);
|
||||
}
|
||||
|
||||
// The java_calling_convention describes stack locations as ideal slots on
|
||||
// a frame with no abi restrictions. Since we must observe abi restrictions
|
||||
// (like the placement of the register window) the slots must be biased by
|
||||
|
@ -39,7 +39,6 @@
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/runtime.hpp"
|
||||
#endif
|
||||
@ -1711,20 +1710,42 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// to a long, int, short, or byte copy loop.
|
||||
//
|
||||
address generate_unsafe_copy(const char *name,
|
||||
address byte_copy_entry) {
|
||||
#ifdef PRODUCT
|
||||
return StubRoutines::_jbyte_arraycopy;
|
||||
#else
|
||||
address byte_copy_entry,
|
||||
address short_copy_entry,
|
||||
address int_copy_entry,
|
||||
address long_copy_entry) {
|
||||
Label L_long_aligned, L_int_aligned, L_short_aligned;
|
||||
Register s = c_rarg0, d = c_rarg1, count = c_rarg2;
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
|
||||
// bump this on entry, not on exit:
|
||||
__ lea(rscratch2, ExternalAddress((address)&SharedRuntime::_unsafe_array_copy_ctr));
|
||||
__ incrementw(Address(rscratch2));
|
||||
inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
|
||||
|
||||
__ orr(rscratch1, s, d);
|
||||
__ orr(rscratch1, rscratch1, count);
|
||||
|
||||
__ andr(rscratch1, rscratch1, BytesPerLong-1);
|
||||
__ cbz(rscratch1, L_long_aligned);
|
||||
__ andr(rscratch1, rscratch1, BytesPerInt-1);
|
||||
__ cbz(rscratch1, L_int_aligned);
|
||||
__ tbz(rscratch1, 0, L_short_aligned);
|
||||
__ b(RuntimeAddress(byte_copy_entry));
|
||||
|
||||
__ BIND(L_short_aligned);
|
||||
__ lsr(count, count, LogBytesPerShort); // size => short_count
|
||||
__ b(RuntimeAddress(short_copy_entry));
|
||||
__ BIND(L_int_aligned);
|
||||
__ lsr(count, count, LogBytesPerInt); // size => int_count
|
||||
__ b(RuntimeAddress(int_copy_entry));
|
||||
__ BIND(L_long_aligned);
|
||||
__ lsr(count, count, LogBytesPerLong); // size => long_count
|
||||
__ b(RuntimeAddress(long_copy_entry));
|
||||
|
||||
return start;
|
||||
#endif
|
||||
}
|
||||
|
||||
//
|
||||
@ -2090,7 +2111,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
/*dest_uninitialized*/true);
|
||||
|
||||
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy",
|
||||
entry_jbyte_arraycopy);
|
||||
entry_jbyte_arraycopy,
|
||||
entry_jshort_arraycopy,
|
||||
entry_jint_arraycopy,
|
||||
entry_jlong_arraycopy);
|
||||
|
||||
StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy",
|
||||
entry_jbyte_arraycopy,
|
||||
|
@ -1983,14 +1983,8 @@ void TemplateInterpreterGenerator::count_bytecode() {
|
||||
__ push(rscratch1);
|
||||
__ push(rscratch2);
|
||||
__ push(rscratch3);
|
||||
Label L;
|
||||
__ mov(rscratch2, (address) &BytecodeCounter::_counter_value);
|
||||
__ prfm(Address(rscratch2), PSTL1STRM);
|
||||
__ bind(L);
|
||||
__ ldxr(rscratch1, rscratch2);
|
||||
__ add(rscratch1, rscratch1, 1);
|
||||
__ stxr(rscratch3, rscratch1, rscratch2);
|
||||
__ cbnzw(rscratch3, L);
|
||||
__ mov(rscratch3, (address) &BytecodeCounter::_counter_value);
|
||||
__ atomic_add(noreg, 1, rscratch3);
|
||||
__ pop(rscratch3);
|
||||
__ pop(rscratch2);
|
||||
__ pop(rscratch1);
|
||||
|
@ -73,6 +73,7 @@ public:
|
||||
CPU_SHA1 = (1<<5),
|
||||
CPU_SHA2 = (1<<6),
|
||||
CPU_CRC32 = (1<<7),
|
||||
CPU_LSE = (1<<8),
|
||||
CPU_A53MAC = (1 << 30),
|
||||
CPU_DMB_ATOMICS = (1 << 31),
|
||||
};
|
||||
|
@ -624,6 +624,7 @@ class Assembler : public AbstractAssembler {
|
||||
VNOR_OPCODE = (4u << OPCODE_SHIFT | 1284u ),
|
||||
VOR_OPCODE = (4u << OPCODE_SHIFT | 1156u ),
|
||||
VXOR_OPCODE = (4u << OPCODE_SHIFT | 1220u ),
|
||||
VRLD_OPCODE = (4u << OPCODE_SHIFT | 196u ),
|
||||
VRLB_OPCODE = (4u << OPCODE_SHIFT | 4u ),
|
||||
VRLW_OPCODE = (4u << OPCODE_SHIFT | 132u ),
|
||||
VRLH_OPCODE = (4u << OPCODE_SHIFT | 68u ),
|
||||
@ -2047,6 +2048,7 @@ class Assembler : public AbstractAssembler {
|
||||
inline void vnor( VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
inline void vor( VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
inline void vxor( VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
inline void vrld( VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
inline void vrlb( VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
inline void vrlw( VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
inline void vrlh( VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
|
@ -839,6 +839,7 @@ inline void Assembler::vandc( VectorRegister d, VectorRegister a, VectorRegist
|
||||
inline void Assembler::vnor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vxor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VXOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vrld( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLD_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vrlb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vrlw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vrlh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
|
@ -55,6 +55,7 @@ define_pd_global(bool, UseTLAB, true);
|
||||
define_pd_global(bool, ResizeTLAB, true);
|
||||
define_pd_global(intx, LoopUnrollLimit, 60);
|
||||
define_pd_global(intx, LoopPercentProfileLimit, 10);
|
||||
define_pd_global(intx, PostLoopMultiversioning, false);
|
||||
|
||||
// Peephole and CISC spilling both break the graph, and so make the
|
||||
// scheduler sick.
|
||||
|
@ -30,6 +30,5 @@
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
void pd_ps(frame f) {}
|
||||
|
@ -27,7 +27,6 @@
|
||||
#define CPU_PPC_VM_FRAME_PPC_HPP
|
||||
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
// C frame layout on PPC-64.
|
||||
//
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
// We have interfaces for the following instructions:
|
||||
//
|
||||
|
@ -483,6 +483,18 @@ bool SharedRuntime::is_wide_vector(int size) {
|
||||
assert(size <= 8, "%d bytes vectors are not supported", size);
|
||||
return size > 8;
|
||||
}
|
||||
|
||||
size_t SharedRuntime::trampoline_size() {
|
||||
return Assembler::load_const_size + 8;
|
||||
}
|
||||
|
||||
void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
|
||||
Register Rtemp = R12;
|
||||
__ load_const(Rtemp, destination);
|
||||
__ mtctr(Rtemp);
|
||||
__ bctr();
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
static int reg2slot(VMReg r) {
|
||||
return r->reg2stack() + SharedRuntime::out_preserve_stack_slots();
|
||||
|
@ -37,7 +37,6 @@
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
#define __ _masm->
|
||||
@ -2417,6 +2416,433 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
// Arguments for generated stub (little endian only):
|
||||
// R3_ARG1 - source byte array address
|
||||
// R4_ARG2 - destination byte array address
|
||||
// R5_ARG3 - round key array
|
||||
address generate_aescrypt_encryptBlock() {
|
||||
assert(UseAES, "need AES instructions and misaligned SSE support");
|
||||
StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
|
||||
|
||||
address start = __ function_entry();
|
||||
|
||||
Label L_doLast;
|
||||
|
||||
Register from = R3_ARG1; // source array address
|
||||
Register to = R4_ARG2; // destination array address
|
||||
Register key = R5_ARG3; // round key array
|
||||
|
||||
Register keylen = R8;
|
||||
Register temp = R9;
|
||||
Register keypos = R10;
|
||||
Register hex = R11;
|
||||
Register fifteen = R12;
|
||||
|
||||
VectorRegister vRet = VR0;
|
||||
|
||||
VectorRegister vKey1 = VR1;
|
||||
VectorRegister vKey2 = VR2;
|
||||
VectorRegister vKey3 = VR3;
|
||||
VectorRegister vKey4 = VR4;
|
||||
|
||||
VectorRegister fromPerm = VR5;
|
||||
VectorRegister keyPerm = VR6;
|
||||
VectorRegister toPerm = VR7;
|
||||
VectorRegister fSplt = VR8;
|
||||
|
||||
VectorRegister vTmp1 = VR9;
|
||||
VectorRegister vTmp2 = VR10;
|
||||
VectorRegister vTmp3 = VR11;
|
||||
VectorRegister vTmp4 = VR12;
|
||||
|
||||
VectorRegister vLow = VR13;
|
||||
VectorRegister vHigh = VR14;
|
||||
|
||||
__ li (hex, 16);
|
||||
__ li (fifteen, 15);
|
||||
__ vspltisb (fSplt, 0x0f);
|
||||
|
||||
// load unaligned from[0-15] to vsRet
|
||||
__ lvx (vRet, from);
|
||||
__ lvx (vTmp1, fifteen, from);
|
||||
__ lvsl (fromPerm, from);
|
||||
__ vxor (fromPerm, fromPerm, fSplt);
|
||||
__ vperm (vRet, vRet, vTmp1, fromPerm);
|
||||
|
||||
// load keylen (44 or 52 or 60)
|
||||
__ lwz (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
|
||||
|
||||
// to load keys
|
||||
__ lvsr (keyPerm, key);
|
||||
__ vxor (vTmp2, vTmp2, vTmp2);
|
||||
__ vspltisb (vTmp2, -16);
|
||||
__ vrld (keyPerm, keyPerm, vTmp2);
|
||||
__ vrld (keyPerm, keyPerm, vTmp2);
|
||||
__ vsldoi (keyPerm, keyPerm, keyPerm, -8);
|
||||
|
||||
// load the 1st round key to vKey1
|
||||
__ li (keypos, 0);
|
||||
__ lvx (vKey1, keypos, key);
|
||||
__ addi (keypos, keypos, 16);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ vperm (vKey1, vTmp1, vKey1, keyPerm);
|
||||
|
||||
// 1st round
|
||||
__ vxor (vRet, vRet, vKey1);
|
||||
|
||||
// load the 2nd round key to vKey1
|
||||
__ addi (keypos, keypos, 16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey1, vTmp2, vTmp1, keyPerm);
|
||||
|
||||
// load the 3rd round key to vKey2
|
||||
__ addi (keypos, keypos, 16);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ vperm (vKey2, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
// load the 4th round key to vKey3
|
||||
__ addi (keypos, keypos, 16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey3, vTmp2, vTmp1, keyPerm);
|
||||
|
||||
// load the 5th round key to vKey4
|
||||
__ addi (keypos, keypos, 16);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ vperm (vKey4, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
// 2nd - 5th rounds
|
||||
__ vcipher (vRet, vRet, vKey1);
|
||||
__ vcipher (vRet, vRet, vKey2);
|
||||
__ vcipher (vRet, vRet, vKey3);
|
||||
__ vcipher (vRet, vRet, vKey4);
|
||||
|
||||
// load the 6th round key to vKey1
|
||||
__ addi (keypos, keypos, 16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey1, vTmp2, vTmp1, keyPerm);
|
||||
|
||||
// load the 7th round key to vKey2
|
||||
__ addi (keypos, keypos, 16);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ vperm (vKey2, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
// load the 8th round key to vKey3
|
||||
__ addi (keypos, keypos, 16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey3, vTmp2, vTmp1, keyPerm);
|
||||
|
||||
// load the 9th round key to vKey4
|
||||
__ addi (keypos, keypos, 16);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ vperm (vKey4, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
// 6th - 9th rounds
|
||||
__ vcipher (vRet, vRet, vKey1);
|
||||
__ vcipher (vRet, vRet, vKey2);
|
||||
__ vcipher (vRet, vRet, vKey3);
|
||||
__ vcipher (vRet, vRet, vKey4);
|
||||
|
||||
// load the 10th round key to vKey1
|
||||
__ addi (keypos, keypos, 16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey1, vTmp2, vTmp1, keyPerm);
|
||||
|
||||
// load the 11th round key to vKey2
|
||||
__ addi (keypos, keypos, 16);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ vperm (vKey2, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
// if all round keys are loaded, skip next 4 rounds
|
||||
__ cmpwi (CCR0, keylen, 44);
|
||||
__ beq (CCR0, L_doLast);
|
||||
|
||||
// 10th - 11th rounds
|
||||
__ vcipher (vRet, vRet, vKey1);
|
||||
__ vcipher (vRet, vRet, vKey2);
|
||||
|
||||
// load the 12th round key to vKey1
|
||||
__ addi (keypos, keypos, 16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey1, vTmp2, vTmp1, keyPerm);
|
||||
|
||||
// load the 13th round key to vKey2
|
||||
__ addi (keypos, keypos, 16);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ vperm (vKey2, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
// if all round keys are loaded, skip next 2 rounds
|
||||
__ cmpwi (CCR0, keylen, 52);
|
||||
__ beq (CCR0, L_doLast);
|
||||
|
||||
// 12th - 13th rounds
|
||||
__ vcipher (vRet, vRet, vKey1);
|
||||
__ vcipher (vRet, vRet, vKey2);
|
||||
|
||||
// load the 14th round key to vKey1
|
||||
__ addi (keypos, keypos, 16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey1, vTmp2, vTmp1, keyPerm);
|
||||
|
||||
// load the 15th round key to vKey2
|
||||
__ addi (keypos, keypos, 16);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ vperm (vKey2, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
__ bind(L_doLast);
|
||||
|
||||
// last two rounds
|
||||
__ vcipher (vRet, vRet, vKey1);
|
||||
__ vcipherlast (vRet, vRet, vKey2);
|
||||
|
||||
__ neg (temp, to);
|
||||
__ lvsr (toPerm, temp);
|
||||
__ vspltisb (vTmp2, -1);
|
||||
__ vxor (vTmp1, vTmp1, vTmp1);
|
||||
__ vperm (vTmp2, vTmp2, vTmp1, toPerm);
|
||||
__ vxor (toPerm, toPerm, fSplt);
|
||||
__ lvx (vTmp1, to);
|
||||
__ vperm (vRet, vRet, vRet, toPerm);
|
||||
__ vsel (vTmp1, vTmp1, vRet, vTmp2);
|
||||
__ lvx (vTmp4, fifteen, to);
|
||||
__ stvx (vTmp1, to);
|
||||
__ vsel (vRet, vRet, vTmp4, vTmp2);
|
||||
__ stvx (vRet, fifteen, to);
|
||||
|
||||
__ blr();
|
||||
return start;
|
||||
}
|
||||
|
||||
// Arguments for generated stub (little endian only):
|
||||
// R3_ARG1 - source byte array address
|
||||
// R4_ARG2 - destination byte array address
|
||||
// R5_ARG3 - K (key) in little endian int array
|
||||
address generate_aescrypt_decryptBlock() {
|
||||
assert(UseAES, "need AES instructions and misaligned SSE support");
|
||||
StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
|
||||
|
||||
address start = __ function_entry();
|
||||
|
||||
Label L_doLast;
|
||||
Label L_do44;
|
||||
Label L_do52;
|
||||
Label L_do60;
|
||||
|
||||
Register from = R3_ARG1; // source array address
|
||||
Register to = R4_ARG2; // destination array address
|
||||
Register key = R5_ARG3; // round key array
|
||||
|
||||
Register keylen = R8;
|
||||
Register temp = R9;
|
||||
Register keypos = R10;
|
||||
Register hex = R11;
|
||||
Register fifteen = R12;
|
||||
|
||||
VectorRegister vRet = VR0;
|
||||
|
||||
VectorRegister vKey1 = VR1;
|
||||
VectorRegister vKey2 = VR2;
|
||||
VectorRegister vKey3 = VR3;
|
||||
VectorRegister vKey4 = VR4;
|
||||
VectorRegister vKey5 = VR5;
|
||||
|
||||
VectorRegister fromPerm = VR6;
|
||||
VectorRegister keyPerm = VR7;
|
||||
VectorRegister toPerm = VR8;
|
||||
VectorRegister fSplt = VR9;
|
||||
|
||||
VectorRegister vTmp1 = VR10;
|
||||
VectorRegister vTmp2 = VR11;
|
||||
VectorRegister vTmp3 = VR12;
|
||||
VectorRegister vTmp4 = VR13;
|
||||
|
||||
VectorRegister vLow = VR14;
|
||||
VectorRegister vHigh = VR15;
|
||||
|
||||
__ li (hex, 16);
|
||||
__ li (fifteen, 15);
|
||||
__ vspltisb (fSplt, 0x0f);
|
||||
|
||||
// load unaligned from[0-15] to vsRet
|
||||
__ lvx (vRet, from);
|
||||
__ lvx (vTmp1, fifteen, from);
|
||||
__ lvsl (fromPerm, from);
|
||||
__ vxor (fromPerm, fromPerm, fSplt);
|
||||
__ vperm (vRet, vRet, vTmp1, fromPerm); // align [and byte swap in LE]
|
||||
|
||||
// load keylen (44 or 52 or 60)
|
||||
__ lwz (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
|
||||
|
||||
// to load keys
|
||||
__ lvsr (keyPerm, key);
|
||||
__ vxor (vTmp2, vTmp2, vTmp2);
|
||||
__ vspltisb (vTmp2, -16);
|
||||
__ vrld (keyPerm, keyPerm, vTmp2);
|
||||
__ vrld (keyPerm, keyPerm, vTmp2);
|
||||
__ vsldoi (keyPerm, keyPerm, keyPerm, -8);
|
||||
|
||||
__ cmpwi (CCR0, keylen, 44);
|
||||
__ beq (CCR0, L_do44);
|
||||
|
||||
__ cmpwi (CCR0, keylen, 52);
|
||||
__ beq (CCR0, L_do52);
|
||||
|
||||
// load the 15th round key to vKey11
|
||||
__ li (keypos, 240);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey1, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
// load the 14th round key to vKey10
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ vperm (vKey2, vTmp2, vTmp1, keyPerm);
|
||||
|
||||
// load the 13th round key to vKey10
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey3, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
// load the 12th round key to vKey10
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ vperm (vKey4, vTmp2, vTmp1, keyPerm);
|
||||
|
||||
// load the 11th round key to vKey10
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey5, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
// 1st - 5th rounds
|
||||
__ vxor (vRet, vRet, vKey1);
|
||||
__ vncipher (vRet, vRet, vKey2);
|
||||
__ vncipher (vRet, vRet, vKey3);
|
||||
__ vncipher (vRet, vRet, vKey4);
|
||||
__ vncipher (vRet, vRet, vKey5);
|
||||
|
||||
__ b (L_doLast);
|
||||
|
||||
__ bind (L_do52);
|
||||
|
||||
// load the 13th round key to vKey11
|
||||
__ li (keypos, 208);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey1, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
// load the 12th round key to vKey10
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ vperm (vKey2, vTmp2, vTmp1, keyPerm);
|
||||
|
||||
// load the 11th round key to vKey10
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey3, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
// 1st - 3rd rounds
|
||||
__ vxor (vRet, vRet, vKey1);
|
||||
__ vncipher (vRet, vRet, vKey2);
|
||||
__ vncipher (vRet, vRet, vKey3);
|
||||
|
||||
__ b (L_doLast);
|
||||
|
||||
__ bind (L_do44);
|
||||
|
||||
// load the 11th round key to vKey11
|
||||
__ li (keypos, 176);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey1, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
// 1st round
|
||||
__ vxor (vRet, vRet, vKey1);
|
||||
|
||||
__ bind (L_doLast);
|
||||
|
||||
// load the 10th round key to vKey10
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ vperm (vKey1, vTmp2, vTmp1, keyPerm);
|
||||
|
||||
// load the 9th round key to vKey10
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey2, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
// load the 8th round key to vKey10
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ vperm (vKey3, vTmp2, vTmp1, keyPerm);
|
||||
|
||||
// load the 7th round key to vKey10
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey4, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
// load the 6th round key to vKey10
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ vperm (vKey5, vTmp2, vTmp1, keyPerm);
|
||||
|
||||
// last 10th - 6th rounds
|
||||
__ vncipher (vRet, vRet, vKey1);
|
||||
__ vncipher (vRet, vRet, vKey2);
|
||||
__ vncipher (vRet, vRet, vKey3);
|
||||
__ vncipher (vRet, vRet, vKey4);
|
||||
__ vncipher (vRet, vRet, vKey5);
|
||||
|
||||
// load the 5th round key to vKey10
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey1, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
// load the 4th round key to vKey10
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ vperm (vKey2, vTmp2, vTmp1, keyPerm);
|
||||
|
||||
// load the 3rd round key to vKey10
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey3, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
// load the 2nd round key to vKey10
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp1, keypos, key);
|
||||
__ vperm (vKey4, vTmp2, vTmp1, keyPerm);
|
||||
|
||||
// load the 1st round key to vKey10
|
||||
__ addi (keypos, keypos, -16);
|
||||
__ lvx (vTmp2, keypos, key);
|
||||
__ vperm (vKey5, vTmp1, vTmp2, keyPerm);
|
||||
|
||||
// last 5th - 1th rounds
|
||||
__ vncipher (vRet, vRet, vKey1);
|
||||
__ vncipher (vRet, vRet, vKey2);
|
||||
__ vncipher (vRet, vRet, vKey3);
|
||||
__ vncipher (vRet, vRet, vKey4);
|
||||
__ vncipherlast (vRet, vRet, vKey5);
|
||||
|
||||
__ neg (temp, to);
|
||||
__ lvsr (toPerm, temp);
|
||||
__ vspltisb (vTmp2, -1);
|
||||
__ vxor (vTmp1, vTmp1, vTmp1);
|
||||
__ vperm (vTmp2, vTmp2, vTmp1, toPerm);
|
||||
__ vxor (toPerm, toPerm, fSplt);
|
||||
__ lvx (vTmp1, to);
|
||||
__ vperm (vRet, vRet, vRet, toPerm);
|
||||
__ vsel (vTmp1, vTmp1, vRet, vTmp2);
|
||||
__ lvx (vTmp4, fifteen, to);
|
||||
__ stvx (vTmp1, to);
|
||||
__ vsel (vRet, vRet, vTmp4, vTmp2);
|
||||
__ stvx (vRet, fifteen, to);
|
||||
|
||||
__ blr();
|
||||
return start;
|
||||
}
|
||||
|
||||
void generate_arraycopy_stubs() {
|
||||
// Note: the disjoint stubs must be generated first, some of
|
||||
@ -2693,10 +3119,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// arraycopy stubs used by compilers
|
||||
generate_arraycopy_stubs();
|
||||
|
||||
if (UseAESIntrinsics) {
|
||||
guarantee(!UseAESIntrinsics, "not yet implemented.");
|
||||
}
|
||||
|
||||
// Safefetch stubs.
|
||||
generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
|
||||
&StubRoutines::_safefetch32_fault_pc,
|
||||
@ -2719,6 +3141,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_montgomerySquare
|
||||
= CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
|
||||
}
|
||||
|
||||
if (UseAESIntrinsics) {
|
||||
StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
|
||||
StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public:
|
||||
|
@ -122,7 +122,7 @@ void VM_Version::initialize() {
|
||||
(has_fcfids() ? " fcfids" : ""),
|
||||
(has_vand() ? " vand" : ""),
|
||||
(has_lqarx() ? " lqarx" : ""),
|
||||
(has_vcipher() ? " vcipher" : ""),
|
||||
(has_vcipher() ? " aes" : ""),
|
||||
(has_vpmsumb() ? " vpmsumb" : ""),
|
||||
(has_tcheck() ? " tcheck" : ""),
|
||||
(has_mfdscr() ? " mfdscr" : "")
|
||||
@ -186,6 +186,28 @@ void VM_Version::initialize() {
|
||||
}
|
||||
|
||||
// The AES intrinsic stubs require AES instruction support.
|
||||
#if defined(VM_LITTLE_ENDIAN)
|
||||
if (has_vcipher()) {
|
||||
if (FLAG_IS_DEFAULT(UseAES)) {
|
||||
UseAES = true;
|
||||
}
|
||||
} else if (UseAES) {
|
||||
if (!FLAG_IS_DEFAULT(UseAES))
|
||||
warning("AES instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAES, false);
|
||||
}
|
||||
|
||||
if (UseAES && has_vcipher()) {
|
||||
if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
|
||||
UseAESIntrinsics = true;
|
||||
}
|
||||
} else if (UseAESIntrinsics) {
|
||||
if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
|
||||
warning("AES intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
|
||||
}
|
||||
|
||||
#else
|
||||
if (UseAES) {
|
||||
warning("AES instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAES, false);
|
||||
@ -195,6 +217,7 @@ void VM_Version::initialize() {
|
||||
warning("AES intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (UseAESCTRIntrinsics) {
|
||||
warning("AES/CTR intrinsics are not available on this CPU");
|
||||
|
@ -53,6 +53,7 @@ define_pd_global(bool, UseTLAB, true);
|
||||
define_pd_global(bool, ResizeTLAB, true);
|
||||
define_pd_global(intx, LoopUnrollLimit, 60); // Design center runs on 1.3.1
|
||||
define_pd_global(intx, LoopPercentProfileLimit, 10);
|
||||
define_pd_global(intx, PostLoopMultiversioning, false);
|
||||
define_pd_global(intx, MinJumpTableSize, 5);
|
||||
|
||||
// Peephole and CISC spilling both break the graph, and so makes the
|
||||
|
@ -29,7 +29,6 @@
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
|
@ -26,7 +26,6 @@
|
||||
#define CPU_SPARC_VM_FRAME_SPARC_HPP
|
||||
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
// A frame represents a physical stack frame (an activation). Frames can be
|
||||
// C or Java frames, and the Java frames can be interpreted or compiled.
|
||||
|
@ -65,8 +65,6 @@ void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
|
||||
*vtable = dummy_vtable;
|
||||
*md_top += vtable_bytes;
|
||||
|
||||
guarantee(*md_top <= md_end, "Insufficient space for vtables.");
|
||||
|
||||
// Get ready to generate dummy methods.
|
||||
|
||||
CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
|
||||
|
@ -29,7 +29,6 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
// We have interface for the following instructions:
|
||||
// - NativeInstruction
|
||||
|
@ -324,6 +324,16 @@ bool SharedRuntime::is_wide_vector(int size) {
|
||||
return size > 8;
|
||||
}
|
||||
|
||||
size_t SharedRuntime::trampoline_size() {
|
||||
return 40;
|
||||
}
|
||||
|
||||
void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
|
||||
__ set((intptr_t)destination, G3_scratch);
|
||||
__ JMP(G3_scratch, 0);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
|
||||
// The java_calling_convention describes stack locations as ideal slots on
|
||||
// a frame with no abi restrictions. Since we must observe abi restrictions
|
||||
// (like the placement of the register window) the slots must be biased by
|
||||
|
@ -37,7 +37,6 @@
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/runtime.hpp"
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1977,39 +1977,43 @@ private:
|
||||
void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
|
||||
void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
|
||||
|
||||
// 128bit copy from/to 256bit (YMM) vector registers
|
||||
void vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
|
||||
// vinserti forms
|
||||
void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
|
||||
void vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8);
|
||||
void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8);
|
||||
void vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
|
||||
void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
|
||||
void vextractf128(Address dst, XMMRegister src, uint8_t imm8);
|
||||
void vextracti128(Address dst, XMMRegister src, uint8_t imm8);
|
||||
|
||||
// 256bit copy from/to 512bit (ZMM) vector registers
|
||||
void vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
|
||||
void vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
|
||||
void vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
|
||||
void vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
|
||||
void vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
|
||||
void vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
|
||||
void vextractf64x4(Address dst, XMMRegister src, uint8_t imm8);
|
||||
void vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
|
||||
|
||||
// 128bit copy from/to 256bit (YMM) or 512bit (ZMM) vector registers
|
||||
void vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8);
|
||||
void vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8);
|
||||
void vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
|
||||
void vextractf32x4(Address dst, XMMRegister src, uint8_t imm8);
|
||||
// vinsertf forms
|
||||
void vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
|
||||
void vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
|
||||
void vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
|
||||
void vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
|
||||
void vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
|
||||
void vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
|
||||
|
||||
// duplicate 4-bytes integer data from src into 8 locations in dest
|
||||
// vextracti forms
|
||||
void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8);
|
||||
void vextracti128(Address dst, XMMRegister src, uint8_t imm8);
|
||||
void vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
|
||||
void vextracti32x4(Address dst, XMMRegister src, uint8_t imm8);
|
||||
void vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8);
|
||||
void vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
|
||||
|
||||
// vextractf forms
|
||||
void vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8);
|
||||
void vextractf128(Address dst, XMMRegister src, uint8_t imm8);
|
||||
void vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
|
||||
void vextractf32x4(Address dst, XMMRegister src, uint8_t imm8);
|
||||
void vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8);
|
||||
void vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
|
||||
void vextractf64x4(Address dst, XMMRegister src, uint8_t imm8);
|
||||
|
||||
// legacy xmm sourced word/dword replicate
|
||||
void vpbroadcastw(XMMRegister dst, XMMRegister src);
|
||||
void vpbroadcastd(XMMRegister dst, XMMRegister src);
|
||||
|
||||
// duplicate 2-bytes integer data from src into 16 locations in dest
|
||||
void vpbroadcastw(XMMRegister dst, XMMRegister src);
|
||||
|
||||
// duplicate n-bytes integer data from src into vector_len locations in dest
|
||||
// xmm/mem sourced byte/word/dword/qword replicate
|
||||
void evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len);
|
||||
void evpbroadcastb(XMMRegister dst, Address src, int vector_len);
|
||||
void evpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len);
|
||||
@ -2019,11 +2023,13 @@ private:
|
||||
void evpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len);
|
||||
void evpbroadcastq(XMMRegister dst, Address src, int vector_len);
|
||||
|
||||
// scalar single/double precision replicate
|
||||
void evpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len);
|
||||
void evpbroadcastss(XMMRegister dst, Address src, int vector_len);
|
||||
void evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len);
|
||||
void evpbroadcastsd(XMMRegister dst, Address src, int vector_len);
|
||||
|
||||
// gpr sourced byte/word/dword/qword replicate
|
||||
void evpbroadcastb(XMMRegister dst, Register src, int vector_len);
|
||||
void evpbroadcastw(XMMRegister dst, Register src, int vector_len);
|
||||
void evpbroadcastd(XMMRegister dst, Register src, int vector_len);
|
||||
|
@ -47,6 +47,7 @@ define_pd_global(intx, ConditionalMoveLimit, 3);
|
||||
define_pd_global(intx, FreqInlineSize, 325);
|
||||
define_pd_global(intx, MinJumpTableSize, 10);
|
||||
define_pd_global(intx, LoopPercentProfileLimit, 30);
|
||||
define_pd_global(intx, PostLoopMultiversioning, true);
|
||||
#ifdef AMD64
|
||||
define_pd_global(intx, INTPRESSURE, 13);
|
||||
define_pd_global(intx, FLOATPRESSURE, 14);
|
||||
|
@ -29,6 +29,5 @@
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
void pd_ps(frame f) {}
|
||||
|
@ -26,7 +26,6 @@
|
||||
#define CPU_X86_VM_FRAME_X86_HPP
|
||||
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
// A frame represents a physical stack frame (an activation). Frames can be
|
||||
// C or Java frames, and the Java frames can be interpreted or compiled.
|
||||
|
@ -1216,7 +1216,10 @@ public:
|
||||
void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
|
||||
|
||||
void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
|
||||
if (UseAVX > 1) { // vinserti128 is available only in AVX2
|
||||
if (UseAVX > 2) {
|
||||
Assembler::vinserti32x4(dst, dst, src, imm8);
|
||||
} else if (UseAVX > 1) {
|
||||
// vinserti128 is available only in AVX2
|
||||
Assembler::vinserti128(dst, nds, src, imm8);
|
||||
} else {
|
||||
Assembler::vinsertf128(dst, nds, src, imm8);
|
||||
@ -1224,7 +1227,10 @@ public:
|
||||
}
|
||||
|
||||
void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
|
||||
if (UseAVX > 1) { // vinserti128 is available only in AVX2
|
||||
if (UseAVX > 2) {
|
||||
Assembler::vinserti32x4(dst, dst, src, imm8);
|
||||
} else if (UseAVX > 1) {
|
||||
// vinserti128 is available only in AVX2
|
||||
Assembler::vinserti128(dst, nds, src, imm8);
|
||||
} else {
|
||||
Assembler::vinsertf128(dst, nds, src, imm8);
|
||||
@ -1232,7 +1238,10 @@ public:
|
||||
}
|
||||
|
||||
void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
|
||||
if (UseAVX > 1) { // vextracti128 is available only in AVX2
|
||||
if (UseAVX > 2) {
|
||||
Assembler::vextracti32x4(dst, src, imm8);
|
||||
} else if (UseAVX > 1) {
|
||||
// vextracti128 is available only in AVX2
|
||||
Assembler::vextracti128(dst, src, imm8);
|
||||
} else {
|
||||
Assembler::vextractf128(dst, src, imm8);
|
||||
@ -1240,7 +1249,10 @@ public:
|
||||
}
|
||||
|
||||
void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
|
||||
if (UseAVX > 1) { // vextracti128 is available only in AVX2
|
||||
if (UseAVX > 2) {
|
||||
Assembler::vextracti32x4(dst, src, imm8);
|
||||
} else if (UseAVX > 1) {
|
||||
// vextracti128 is available only in AVX2
|
||||
Assembler::vextracti128(dst, src, imm8);
|
||||
} else {
|
||||
Assembler::vextractf128(dst, src, imm8);
|
||||
@ -1260,37 +1272,57 @@ public:
|
||||
void vextracti128_high(Address dst, XMMRegister src) {
|
||||
vextracti128(dst, src, 1);
|
||||
}
|
||||
|
||||
void vinsertf128_high(XMMRegister dst, XMMRegister src) {
|
||||
vinsertf128(dst, dst, src, 1);
|
||||
if (UseAVX > 2) {
|
||||
Assembler::vinsertf32x4(dst, dst, src, 1);
|
||||
} else {
|
||||
Assembler::vinsertf128(dst, dst, src, 1);
|
||||
}
|
||||
}
|
||||
|
||||
void vinsertf128_high(XMMRegister dst, Address src) {
|
||||
vinsertf128(dst, dst, src, 1);
|
||||
if (UseAVX > 2) {
|
||||
Assembler::vinsertf32x4(dst, dst, src, 1);
|
||||
} else {
|
||||
Assembler::vinsertf128(dst, dst, src, 1);
|
||||
}
|
||||
}
|
||||
|
||||
void vextractf128_high(XMMRegister dst, XMMRegister src) {
|
||||
vextractf128(dst, src, 1);
|
||||
if (UseAVX > 2) {
|
||||
Assembler::vextractf32x4(dst, src, 1);
|
||||
} else {
|
||||
Assembler::vextractf128(dst, src, 1);
|
||||
}
|
||||
}
|
||||
|
||||
void vextractf128_high(Address dst, XMMRegister src) {
|
||||
vextractf128(dst, src, 1);
|
||||
if (UseAVX > 2) {
|
||||
Assembler::vextractf32x4(dst, src, 1);
|
||||
} else {
|
||||
Assembler::vextractf128(dst, src, 1);
|
||||
}
|
||||
}
|
||||
|
||||
// 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
|
||||
void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
|
||||
vinserti64x4(dst, dst, src, 1);
|
||||
Assembler::vinserti64x4(dst, dst, src, 1);
|
||||
}
|
||||
void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
|
||||
vinsertf64x4(dst, dst, src, 1);
|
||||
Assembler::vinsertf64x4(dst, dst, src, 1);
|
||||
}
|
||||
void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
|
||||
vextracti64x4(dst, src, 1);
|
||||
Assembler::vextracti64x4(dst, src, 1);
|
||||
}
|
||||
void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
|
||||
vextractf64x4(dst, src, 1);
|
||||
Assembler::vextractf64x4(dst, src, 1);
|
||||
}
|
||||
void vextractf64x4_high(Address dst, XMMRegister src) {
|
||||
vextractf64x4(dst, src, 1);
|
||||
Assembler::vextractf64x4(dst, src, 1);
|
||||
}
|
||||
void vinsertf64x4_high(XMMRegister dst, Address src) {
|
||||
vinsertf64x4(dst, dst, src, 1);
|
||||
Assembler::vinsertf64x4(dst, dst, src, 1);
|
||||
}
|
||||
|
||||
// 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
|
||||
@ -1306,40 +1338,59 @@ public:
|
||||
void vextracti128_low(Address dst, XMMRegister src) {
|
||||
vextracti128(dst, src, 0);
|
||||
}
|
||||
|
||||
void vinsertf128_low(XMMRegister dst, XMMRegister src) {
|
||||
vinsertf128(dst, dst, src, 0);
|
||||
if (UseAVX > 2) {
|
||||
Assembler::vinsertf32x4(dst, dst, src, 0);
|
||||
} else {
|
||||
Assembler::vinsertf128(dst, dst, src, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void vinsertf128_low(XMMRegister dst, Address src) {
|
||||
vinsertf128(dst, dst, src, 0);
|
||||
if (UseAVX > 2) {
|
||||
Assembler::vinsertf32x4(dst, dst, src, 0);
|
||||
} else {
|
||||
Assembler::vinsertf128(dst, dst, src, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void vextractf128_low(XMMRegister dst, XMMRegister src) {
|
||||
vextractf128(dst, src, 0);
|
||||
if (UseAVX > 2) {
|
||||
Assembler::vextractf32x4(dst, src, 0);
|
||||
} else {
|
||||
Assembler::vextractf128(dst, src, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void vextractf128_low(Address dst, XMMRegister src) {
|
||||
vextractf128(dst, src, 0);
|
||||
if (UseAVX > 2) {
|
||||
Assembler::vextractf32x4(dst, src, 0);
|
||||
} else {
|
||||
Assembler::vextractf128(dst, src, 0);
|
||||
}
|
||||
}
|
||||
|
||||
// 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
|
||||
void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
|
||||
vinserti64x4(dst, dst, src, 0);
|
||||
Assembler::vinserti64x4(dst, dst, src, 0);
|
||||
}
|
||||
void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
|
||||
vinsertf64x4(dst, dst, src, 0);
|
||||
Assembler::vinsertf64x4(dst, dst, src, 0);
|
||||
}
|
||||
void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
|
||||
vextracti64x4(dst, src, 0);
|
||||
Assembler::vextracti64x4(dst, src, 0);
|
||||
}
|
||||
void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
|
||||
vextractf64x4(dst, src, 0);
|
||||
Assembler::vextractf64x4(dst, src, 0);
|
||||
}
|
||||
void vextractf64x4_low(Address dst, XMMRegister src) {
|
||||
vextractf64x4(dst, src, 0);
|
||||
Assembler::vextractf64x4(dst, src, 0);
|
||||
}
|
||||
void vinsertf64x4_low(XMMRegister dst, Address src) {
|
||||
vinsertf64x4(dst, dst, src, 0);
|
||||
Assembler::vinsertf64x4(dst, dst, src, 0);
|
||||
}
|
||||
|
||||
|
||||
// Carry-Less Multiplication Quadword
|
||||
void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
|
||||
// 0x00 - multiply lower 64 bits [0:63]
|
||||
|
@ -29,7 +29,6 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
// We have interfaces for the following instructions:
|
||||
// - NativeInstruction
|
||||
|
@ -355,6 +355,14 @@ bool SharedRuntime::is_wide_vector(int size) {
|
||||
return size > 16;
|
||||
}
|
||||
|
||||
size_t SharedRuntime::trampoline_size() {
|
||||
return 16;
|
||||
}
|
||||
|
||||
void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
|
||||
__ jump(RuntimeAddress(destination));
|
||||
}
|
||||
|
||||
// The java_calling_convention describes stack locations as ideal slots on
|
||||
// a frame with no abi restrictions. Since we must observe abi restrictions
|
||||
// (like the placement of the register window) the slots must be biased by
|
||||
|
@ -391,6 +391,14 @@ bool SharedRuntime::is_wide_vector(int size) {
|
||||
return size > 16;
|
||||
}
|
||||
|
||||
size_t SharedRuntime::trampoline_size() {
|
||||
return 16;
|
||||
}
|
||||
|
||||
void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
|
||||
__ jump(RuntimeAddress(destination));
|
||||
}
|
||||
|
||||
// The java_calling_convention describes stack locations as ideal slots on
|
||||
// a frame with no abi restrictions. Since we must observe abi restrictions
|
||||
// (like the placement of the register window) the slots must be biased by
|
||||
|
@ -38,7 +38,6 @@
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/runtime.hpp"
|
||||
#endif
|
||||
|
@ -38,7 +38,6 @@
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/runtime.hpp"
|
||||
#endif
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
void pd_ps(frame f) {
|
||||
ShouldNotCallThis();
|
||||
|
@ -27,7 +27,6 @@
|
||||
#define CPU_ZERO_VM_FRAME_ZERO_HPP
|
||||
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
// A frame represents a physical stack frame on the Zero stack.
|
||||
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
// We have interfaces for the following instructions:
|
||||
// - NativeInstruction
|
||||
|
@ -132,6 +132,15 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
|
||||
return generate_empty_runtime_stub("resolve_blob");
|
||||
}
|
||||
|
||||
size_t SharedRuntime::trampoline_size() {
|
||||
ShouldNotCallThis();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
|
||||
ShouldNotCallThis();
|
||||
return;
|
||||
}
|
||||
|
||||
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
|
||||
VMRegPair *regs,
|
||||
|
@ -40,7 +40,6 @@
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "stack_zero.inline.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/runtime.hpp"
|
||||
#endif
|
||||
|
@ -55,11 +55,11 @@ public class SAGetopt {
|
||||
private void extractOptarg(String opt) {
|
||||
// Argument expected
|
||||
if (_optind > _argv.length) {
|
||||
throw new RuntimeException("Not enough arguments for '" + opt + "'");
|
||||
throw new SAGetoptException("Not enough arguments for '" + opt + "'");
|
||||
}
|
||||
|
||||
if (! _argv[_optind].isEmpty() && _argv[_optind].charAt(0) == '-') {
|
||||
throw new RuntimeException("Argument is expected for '" + opt + "'");
|
||||
throw new SAGetoptException("Argument is expected for '" + opt + "'");
|
||||
}
|
||||
|
||||
_optarg = _argv[_optind];
|
||||
@ -72,7 +72,7 @@ public class SAGetopt {
|
||||
|
||||
if (los.contains(ca[0])) {
|
||||
if (ca.length > 1) {
|
||||
throw new RuntimeException("Argument is not expected for '" + ca[0] + "'");
|
||||
throw new SAGetoptException("Argument is not expected for '" + ca[0] + "'");
|
||||
}
|
||||
return carg;
|
||||
}
|
||||
@ -87,14 +87,14 @@ public class SAGetopt {
|
||||
try {
|
||||
extractOptarg(ca[0]);
|
||||
} catch (ArrayIndexOutOfBoundsException e) {
|
||||
throw new RuntimeException("Argument is expected for '" + ca[0] + "'");
|
||||
throw new SAGetoptException("Argument is expected for '" + ca[0] + "'");
|
||||
}
|
||||
}
|
||||
|
||||
return ca[0];
|
||||
}
|
||||
|
||||
throw new RuntimeException("Invalid option '" + ca[0] + "'");
|
||||
throw new SAGetoptException("Invalid option '" + ca[0] + "'");
|
||||
}
|
||||
|
||||
public String next(String optStr, String[] longOptStr) {
|
||||
@ -148,7 +148,7 @@ public class SAGetopt {
|
||||
|
||||
int chIndex = optStr.indexOf(ch);
|
||||
if (chIndex == -1) {
|
||||
throw new RuntimeException("Invalid option '" + ch + "'");
|
||||
throw new SAGetoptException("Invalid option '" + ch + "'");
|
||||
}
|
||||
|
||||
if (_optopt >= carg.length()) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,31 +22,12 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_UTILITIES_TOP_HPP
|
||||
#define SHARE_VM_UTILITIES_TOP_HPP
|
||||
package sun.jvm.hotspot;
|
||||
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
#include "utilities/sizes.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1_globals.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_globals.hpp"
|
||||
#endif
|
||||
#ifdef COMPILER2
|
||||
#include "opto/c2_globals.hpp"
|
||||
#endif
|
||||
#if INCLUDE_JVMCI
|
||||
#include "jvmci/jvmci_globals.hpp"
|
||||
#endif
|
||||
public class SAGetoptException extends IllegalArgumentException {
|
||||
|
||||
// THIS FILE IS INTESIONALLY LEFT EMPTY
|
||||
// IT IS USED TO MINIMIZE THE NUMBER OF DEPENDENCIES IN includeDB
|
||||
public SAGetoptException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_UTILITIES_TOP_HPP
|
||||
}
|
@ -111,34 +111,31 @@ public class SALauncher {
|
||||
return launcherHelp();
|
||||
}
|
||||
|
||||
private static void buildAttachArgs(ArrayList<String> newArgs,
|
||||
String pid, String exe, String core) {
|
||||
if ((pid == null) && (exe == null)) {
|
||||
throw new IllegalArgumentException(
|
||||
"You have to set --pid or --exe.");
|
||||
private static void buildAttachArgs(ArrayList<String> newArgs, String pid,
|
||||
String exe, String core, boolean allowEmpty) {
|
||||
if (!allowEmpty && (pid == null) && (exe == null)) {
|
||||
throw new SAGetoptException("You have to set --pid or --exe.");
|
||||
}
|
||||
|
||||
if (pid != null) { // Attach to live process
|
||||
if (exe != null) {
|
||||
throw new IllegalArgumentException(
|
||||
"Unnecessary argument: --exe");
|
||||
throw new SAGetoptException("Unnecessary argument: --exe");
|
||||
} else if (core != null) {
|
||||
throw new IllegalArgumentException(
|
||||
"Unnecessary argument: --core");
|
||||
throw new SAGetoptException("Unnecessary argument: --core");
|
||||
} else if (!pid.matches("^\\d+$")) {
|
||||
throw new IllegalArgumentException("Invalid pid: " + pid);
|
||||
throw new SAGetoptException("Invalid pid: " + pid);
|
||||
}
|
||||
|
||||
newArgs.add(pid);
|
||||
} else {
|
||||
} else if (exe != null) {
|
||||
if (exe.length() == 0) {
|
||||
throw new IllegalArgumentException("You have to set --exe.");
|
||||
throw new SAGetoptException("You have to set --exe.");
|
||||
}
|
||||
|
||||
newArgs.add(exe);
|
||||
|
||||
if ((core == null) || (core.length() == 0)) {
|
||||
throw new IllegalArgumentException("You have to set --core.");
|
||||
throw new SAGetoptException("You have to set --core.");
|
||||
}
|
||||
|
||||
newArgs.add(core);
|
||||
@ -170,7 +167,7 @@ public class SALauncher {
|
||||
}
|
||||
}
|
||||
|
||||
buildAttachArgs(newArgs, pid, exe, core);
|
||||
buildAttachArgs(newArgs, pid, exe, core, true);
|
||||
CLHSDB.main(newArgs.toArray(new String[newArgs.size()]));
|
||||
}
|
||||
|
||||
@ -199,7 +196,7 @@ public class SALauncher {
|
||||
}
|
||||
}
|
||||
|
||||
buildAttachArgs(newArgs, pid, exe, core);
|
||||
buildAttachArgs(newArgs, pid, exe, core, true);
|
||||
HSDB.main(newArgs.toArray(new String[newArgs.size()]));
|
||||
}
|
||||
|
||||
@ -237,7 +234,7 @@ public class SALauncher {
|
||||
}
|
||||
}
|
||||
|
||||
buildAttachArgs(newArgs, pid, exe, core);
|
||||
buildAttachArgs(newArgs, pid, exe, core, false);
|
||||
JStack.main(newArgs.toArray(new String[newArgs.size()]));
|
||||
}
|
||||
|
||||
@ -287,7 +284,7 @@ public class SALauncher {
|
||||
}
|
||||
}
|
||||
|
||||
buildAttachArgs(newArgs, pid, exe, core);
|
||||
buildAttachArgs(newArgs, pid, exe, core, false);
|
||||
JMap.main(newArgs.toArray(new String[newArgs.size()]));
|
||||
}
|
||||
|
||||
@ -325,7 +322,7 @@ public class SALauncher {
|
||||
}
|
||||
}
|
||||
|
||||
buildAttachArgs(newArgs, pid, exe, core);
|
||||
buildAttachArgs(newArgs, pid, exe, core, false);
|
||||
JInfo.main(newArgs.toArray(new String[newArgs.size()]));
|
||||
}
|
||||
|
||||
@ -358,7 +355,7 @@ public class SALauncher {
|
||||
}
|
||||
}
|
||||
|
||||
buildAttachArgs(newArgs, pid, exe, core);
|
||||
buildAttachArgs(newArgs, pid, exe, core, false);
|
||||
JSnap.main(newArgs.toArray(new String[newArgs.size()]));
|
||||
}
|
||||
|
||||
@ -416,8 +413,8 @@ public class SALauncher {
|
||||
return;
|
||||
}
|
||||
|
||||
throw new IllegalArgumentException("Unknown tool: " + args[0]);
|
||||
} catch (Exception e) {
|
||||
throw new SAGetoptException("Unknown tool: " + args[0]);
|
||||
} catch (SAGetoptException e) {
|
||||
System.err.println(e.getMessage());
|
||||
toolHelp(args[0]);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2016 Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -47,8 +47,10 @@ public class MethodCounters extends Metadata {
|
||||
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
|
||||
Type type = db.lookupType("MethodCounters");
|
||||
|
||||
interpreterInvocationCountField = new CIntField(type.getCIntegerField("_interpreter_invocation_count"), 0);
|
||||
interpreterThrowoutCountField = new CIntField(type.getCIntegerField("_interpreter_throwout_count"), 0);
|
||||
if (VM.getVM().isServerCompiler()) {
|
||||
interpreterInvocationCountField = new CIntField(type.getCIntegerField("_interpreter_invocation_count"), 0);
|
||||
interpreterThrowoutCountField = new CIntField(type.getCIntegerField("_interpreter_throwout_count"), 0);
|
||||
}
|
||||
if (!VM.getVM().isCore()) {
|
||||
invocationCounter = new CIntField(type.getCIntegerField("_invocation_counter"), 0);
|
||||
backedgeCounter = new CIntField(type.getCIntegerField("_backedge_counter"), 0);
|
||||
@ -61,11 +63,19 @@ public class MethodCounters extends Metadata {
|
||||
private static CIntField backedgeCounter;
|
||||
|
||||
public int interpreterInvocationCount() {
|
||||
return (int) interpreterInvocationCountField.getValue(this);
|
||||
if (interpreterInvocationCountField != null) {
|
||||
return (int) interpreterInvocationCountField.getValue(this);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
public int interpreterThrowoutCount() {
|
||||
return (int) interpreterThrowoutCountField.getValue(this);
|
||||
if (interpreterThrowoutCountField != null) {
|
||||
return (int) interpreterThrowoutCountField.getValue(this);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
public long getInvocationCounter() {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
|
@ -23,7 +23,6 @@
|
||||
package jdk.vm.ci.hotspot;
|
||||
|
||||
import static jdk.vm.ci.hotspot.UnsafeAccess.UNSAFE;
|
||||
import jdk.vm.ci.common.JVMCIError;
|
||||
import jdk.vm.ci.hotspot.HotSpotVMConfig.CompressEncoding;
|
||||
import jdk.vm.ci.meta.Constant;
|
||||
import jdk.vm.ci.meta.JavaConstant;
|
||||
@ -59,7 +58,7 @@ class HotSpotMemoryAccessProviderImpl implements HotSpotMemoryAccessProvider, Ho
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
throw new JVMCIError("%s", metaspaceObject);
|
||||
throw new IllegalArgumentException(String.valueOf(metaspaceObject));
|
||||
}
|
||||
}
|
||||
return false;
|
||||
@ -75,7 +74,7 @@ class HotSpotMemoryAccessProviderImpl implements HotSpotMemoryAccessProvider, Ho
|
||||
return prim.asLong();
|
||||
}
|
||||
}
|
||||
throw new JVMCIError("%s", base);
|
||||
throw new IllegalArgumentException(String.valueOf(base));
|
||||
}
|
||||
|
||||
private static long readRawValue(Constant baseConstant, long displacement, int bits) {
|
||||
@ -91,7 +90,7 @@ class HotSpotMemoryAccessProviderImpl implements HotSpotMemoryAccessProvider, Ho
|
||||
case Long.SIZE:
|
||||
return UNSAFE.getLong(base, displacement);
|
||||
default:
|
||||
throw new JVMCIError("%d", bits);
|
||||
throw new IllegalArgumentException(String.valueOf(bits));
|
||||
}
|
||||
} else {
|
||||
long pointer = asRawPointer(baseConstant);
|
||||
@ -105,7 +104,7 @@ class HotSpotMemoryAccessProviderImpl implements HotSpotMemoryAccessProvider, Ho
|
||||
case Long.SIZE:
|
||||
return UNSAFE.getLong(pointer + displacement);
|
||||
default:
|
||||
throw new JVMCIError("%d", bits);
|
||||
throw new IllegalArgumentException(String.valueOf(bits));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -178,7 +177,7 @@ class HotSpotMemoryAccessProviderImpl implements HotSpotMemoryAccessProvider, Ho
|
||||
case Double:
|
||||
return JavaConstant.forDouble(Double.longBitsToDouble(rawValue));
|
||||
default:
|
||||
throw new JVMCIError("Unsupported kind: %s", kind);
|
||||
throw new IllegalArgumentException("Unsupported kind: " + kind);
|
||||
}
|
||||
} catch (NullPointerException e) {
|
||||
return null;
|
||||
|
@ -35,8 +35,10 @@ public interface MemoryAccessProvider {
|
||||
* @param displacement the displacement within the object in bytes
|
||||
* @return the read value encapsulated in a {@link JavaConstant} object, or {@code null} if the
|
||||
* value cannot be read.
|
||||
* @throws IllegalArgumentException if {@code kind} is {@link JavaKind#Void} or not
|
||||
* {@linkplain JavaKind#isPrimitive() primitive} kind
|
||||
*/
|
||||
JavaConstant readUnsafeConstant(JavaKind kind, JavaConstant base, long displacement);
|
||||
JavaConstant readUnsafeConstant(JavaKind kind, JavaConstant base, long displacement) throws IllegalArgumentException;
|
||||
|
||||
/**
|
||||
* Reads a primitive value using a base address and a displacement.
|
||||
@ -46,8 +48,11 @@ public interface MemoryAccessProvider {
|
||||
* @param displacement the displacement within the object in bytes
|
||||
* @param bits the number of bits to read from memory
|
||||
* @return the read value encapsulated in a {@link JavaConstant} object of {@link JavaKind} kind
|
||||
* @throws IllegalArgumentException if {@code kind} is {@link JavaKind#Void} or not
|
||||
* {@linkplain JavaKind#isPrimitive() primitive} kind or {@code bits} is not 8, 16,
|
||||
* 32 or 64
|
||||
*/
|
||||
JavaConstant readPrimitiveConstant(JavaKind kind, Constant base, long displacement, int bits);
|
||||
JavaConstant readPrimitiveConstant(JavaKind kind, Constant base, long displacement, int bits) throws IllegalArgumentException;
|
||||
|
||||
/**
|
||||
* Reads a Java {@link Object} value using a base address and a displacement.
|
||||
|
@ -51,6 +51,8 @@ public interface MethodHandleAccessProvider {
|
||||
/**
|
||||
* Returns the method handle method intrinsic identifier for the provided method, or
|
||||
* {@code null} if the method is not an intrinsic processed by this interface.
|
||||
*
|
||||
* @throws NullPointerException if {@code method} is null
|
||||
*/
|
||||
IntrinsicMethod lookupMethodHandleIntrinsic(ResolvedJavaMethod method);
|
||||
|
||||
@ -58,19 +60,27 @@ public interface MethodHandleAccessProvider {
|
||||
* Resolves the invocation target for an invocation of {@link IntrinsicMethod#INVOKE_BASIC
|
||||
* MethodHandle.invokeBasic} with the given constant receiver {@link MethodHandle}. Returns
|
||||
* {@code null} if the invocation target is not available at this time.
|
||||
* <p>
|
||||
*
|
||||
* The first invocations of a method handle can use an interpreter to lookup the actual invoked
|
||||
* method; frequently executed method handles can use Java bytecode generation to avoid the
|
||||
* interpreter overhead. If the parameter forceBytecodeGeneration is set to true, the VM should
|
||||
* try to generate bytecodes before this method returns.
|
||||
*
|
||||
* @returns {@code null} if {@code methodHandle} is not a {@link MethodHandle} or the invocation
|
||||
* target is not available at this time
|
||||
* @throws NullPointerException if {@code methodHandle} is null
|
||||
*/
|
||||
ResolvedJavaMethod resolveInvokeBasicTarget(JavaConstant methodHandle, boolean forceBytecodeGeneration);
|
||||
|
||||
/**
|
||||
* Resolves the invocation target for an invocation of a {@code MethodHandle.linkTo*} method
|
||||
* with the given constant member name. The member name is the last parameter of the
|
||||
* {@code linkTo*} method. Returns {@code null} if the invocation target is not available at
|
||||
* this time.
|
||||
* {@code linkTo*} method.
|
||||
*
|
||||
* @returns {@code null} if the invocation target is not available at this time
|
||||
* @throws NullPointerException if {@code memberName} is null
|
||||
* @throws IllegalArgumentException if {@code memberName} is not a
|
||||
* {@code java.lang.invoke.MemberName}
|
||||
*/
|
||||
ResolvedJavaMethod resolveLinkToTarget(JavaConstant memberName);
|
||||
}
|
||||
|
@ -956,7 +956,7 @@ static int open_sharedmem_file(const char* filename, int oflags, TRAPS) {
|
||||
#ifdef O_NOFOLLOW
|
||||
RESTARTABLE(::open(filename, oflags), result);
|
||||
#else
|
||||
open_o_nofollow(filename, oflags);
|
||||
result = open_o_nofollow(filename, oflags);
|
||||
#endif
|
||||
|
||||
if (result == OS_ERR) {
|
||||
|
@ -3732,6 +3732,28 @@ int os::stat(const char *path, struct stat *sbuf) {
|
||||
return ::stat(pathbuf, sbuf);
|
||||
}
|
||||
|
||||
static inline struct timespec get_mtime(const char* filename) {
|
||||
struct stat st;
|
||||
int ret = os::stat(filename, &st);
|
||||
assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
|
||||
#ifdef __APPLE__
|
||||
return st.st_mtimespec;
|
||||
#else
|
||||
return st.st_mtim;
|
||||
#endif
|
||||
}
|
||||
|
||||
int os::compare_file_modified_times(const char* file1, const char* file2) {
|
||||
struct timespec filetime1 = get_mtime(file1);
|
||||
struct timespec filetime2 = get_mtime(file2);
|
||||
int diff = filetime1.tv_sec - filetime2.tv_sec;
|
||||
if (diff == 0) {
|
||||
return filetime1.tv_nsec - filetime2.tv_nsec;
|
||||
}
|
||||
return diff;
|
||||
}
|
||||
|
||||
|
||||
bool os::check_heap(bool force) {
|
||||
return true;
|
||||
}
|
||||
|
@ -6008,7 +6008,22 @@ bool os::start_debugging(char *buf, int buflen) {
|
||||
return yes;
|
||||
}
|
||||
|
||||
static inline struct timespec get_mtime(const char* filename) {
|
||||
struct stat st;
|
||||
int ret = os::stat(filename, &st);
|
||||
assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
|
||||
return st.st_mtim;
|
||||
}
|
||||
|
||||
int os::compare_file_modified_times(const char* file1, const char* file2) {
|
||||
struct timespec filetime1 = get_mtime(file1);
|
||||
struct timespec filetime2 = get_mtime(file2);
|
||||
int diff = filetime1.tv_sec - filetime2.tv_sec;
|
||||
if (diff == 0) {
|
||||
return filetime1.tv_nsec - filetime2.tv_nsec;
|
||||
}
|
||||
return diff;
|
||||
}
|
||||
|
||||
/////////////// Unit tests ///////////////
|
||||
|
||||
|
@ -181,6 +181,10 @@ int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
|
||||
return vsnprintf(buf, len, fmt, args);
|
||||
}
|
||||
|
||||
int os::fileno(FILE* fp) {
|
||||
return ::fileno(fp);
|
||||
}
|
||||
|
||||
void os::Posix::print_load_average(outputStream* st) {
|
||||
st->print("load average:");
|
||||
double loadavg[3];
|
||||
|
@ -161,6 +161,7 @@ address os::Solaris::handler_end; // end pc of thr_sighndlrinfo
|
||||
|
||||
address os::Solaris::_main_stack_base = NULL; // 4352906 workaround
|
||||
|
||||
os::Solaris::pthread_setname_np_func_t os::Solaris::_pthread_setname_np = NULL;
|
||||
|
||||
// "default" initializers for missing libc APIs
|
||||
extern "C" {
|
||||
@ -441,8 +442,15 @@ static bool assign_distribution(processorid_t* id_array,
|
||||
}
|
||||
|
||||
void os::set_native_thread_name(const char *name) {
|
||||
// Not yet implemented.
|
||||
return;
|
||||
if (Solaris::_pthread_setname_np != NULL) {
|
||||
// Only the first 31 bytes of 'name' are processed by pthread_setname_np
|
||||
// but we explicitly copy into a size-limited buffer to avoid any
|
||||
// possible overflow.
|
||||
char buf[32];
|
||||
snprintf(buf, sizeof(buf), "%s", name);
|
||||
buf[sizeof(buf) - 1] = '\0';
|
||||
Solaris::_pthread_setname_np(pthread_self(), buf);
|
||||
}
|
||||
}
|
||||
|
||||
bool os::distribute_processes(uint length, uint* distribution) {
|
||||
@ -1819,6 +1827,19 @@ int os::stat(const char *path, struct stat *sbuf) {
|
||||
return ::stat(pathbuf, sbuf);
|
||||
}
|
||||
|
||||
static inline time_t get_mtime(const char* filename) {
|
||||
struct stat st;
|
||||
int ret = os::stat(filename, &st);
|
||||
assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
|
||||
return st.st_mtime;
|
||||
}
|
||||
|
||||
int os::compare_file_modified_times(const char* file1, const char* file2) {
|
||||
time_t t1 = get_mtime(file1);
|
||||
time_t t2 = get_mtime(file2);
|
||||
return t1 - t2;
|
||||
}
|
||||
|
||||
static bool _print_ascii_file(const char* filename, outputStream* st) {
|
||||
int fd = ::open(filename, O_RDONLY);
|
||||
if (fd == -1) {
|
||||
@ -4410,6 +4431,13 @@ void os::init(void) {
|
||||
// the minimum of what the OS supports (thr_min_stack()), and
|
||||
// enough to allow the thread to get to user bytecode execution.
|
||||
Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
|
||||
|
||||
// retrieve entry point for pthread_setname_np
|
||||
void * handle = dlopen("libc.so.1", RTLD_LAZY);
|
||||
if (handle != NULL) {
|
||||
Solaris::_pthread_setname_np =
|
||||
(Solaris::pthread_setname_np_func_t)dlsym(handle, "pthread_setname_np");
|
||||
}
|
||||
}
|
||||
|
||||
// To install functions for atexit system call
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -122,6 +122,9 @@ class Solaris {
|
||||
static int _SIGasync; // user-overridable ASYNC_SIGNAL
|
||||
static void set_SIGasync(int newsig) { _SIGasync = newsig; }
|
||||
|
||||
typedef int (*pthread_setname_np_func_t)(pthread_t, const char*);
|
||||
static pthread_setname_np_func_t _pthread_setname_np;
|
||||
|
||||
public:
|
||||
// Large Page Support--ISM.
|
||||
static bool largepage_range(char* addr, size_t size);
|
||||
|
@ -1594,6 +1594,19 @@ int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline time_t get_mtime(const char* filename) {
|
||||
struct stat st;
|
||||
int ret = os::stat(filename, &st);
|
||||
assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
|
||||
return st.st_mtime;
|
||||
}
|
||||
|
||||
int os::compare_file_modified_times(const char* file1, const char* file2) {
|
||||
time_t t1 = get_mtime(file1);
|
||||
time_t t2 = get_mtime(file2);
|
||||
return t1 - t2;
|
||||
}
|
||||
|
||||
void os::print_os_info_brief(outputStream* st) {
|
||||
os::print_os_info(st);
|
||||
}
|
||||
@ -3006,9 +3019,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
|
||||
}
|
||||
#ifdef ASSERT
|
||||
if (should_inject_error) {
|
||||
if (TracePageSizes && Verbose) {
|
||||
tty->print_cr("Reserving pages individually failed.");
|
||||
}
|
||||
log_develop_debug(pagesize)("Reserving pages individually failed.");
|
||||
}
|
||||
#endif
|
||||
return NULL;
|
||||
@ -3192,9 +3203,8 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
|
||||
// 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
|
||||
// 2) NUMA Interleaving is enabled, in which case we use a different node for each page
|
||||
if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
|
||||
if (TracePageSizes && Verbose) {
|
||||
tty->print_cr("Reserving large pages individually.");
|
||||
}
|
||||
log_debug(pagesize)("Reserving large pages individually.");
|
||||
|
||||
char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
|
||||
if (p_buf == NULL) {
|
||||
// give an appropriate warning message
|
||||
@ -3211,9 +3221,8 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
|
||||
return p_buf;
|
||||
|
||||
} else {
|
||||
if (TracePageSizes && Verbose) {
|
||||
tty->print_cr("Reserving large pages in a single large chunk.");
|
||||
}
|
||||
log_debug(pagesize)("Reserving large pages in a single large chunk.");
|
||||
|
||||
// normal policy just allocate it all at once
|
||||
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
|
||||
char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
|
||||
@ -4593,6 +4602,9 @@ int os::ftruncate(int fd, jlong length) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int os::fileno(FILE* fp) {
|
||||
return _fileno(fp);
|
||||
}
|
||||
|
||||
// This code is a copy of JDK's sysSync
|
||||
// from src/windows/hpi/src/sys_api_md.c
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
@ -64,6 +65,14 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
|
||||
return false;
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
|
||||
// In the middle of a trampoline call. Bail out for safety.
|
||||
// This happens rarely so shouldn't affect profiling.
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
frame ret_frame(ret_sp, ret_fp, addr.pc());
|
||||
if (!ret_frame.safe_for_sender(jt)) {
|
||||
#if defined(COMPILER2) || INCLUDE_JVMCI
|
||||
|
@ -24,6 +24,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
@ -66,6 +67,14 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
|
||||
return false;
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
|
||||
// In the middle of a trampoline call. Bail out for safety.
|
||||
// This happens rarely so shouldn't affect profiling.
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
frame ret_frame(ret_sp, ret_fp, addr.pc());
|
||||
if (!ret_frame.safe_for_sender(jt)) {
|
||||
#ifdef COMPILER2
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
@ -64,6 +65,14 @@ bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
|
||||
return false;
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
|
||||
// In the middle of a trampoline call. Bail out for safety.
|
||||
// This happens rarely so shouldn't affect profiling.
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
// we were running Java code when SIGPROF came in
|
||||
if (isInJava) {
|
||||
// If we have a last_Java_sp, then the SIGPROF signal caught us
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
@ -65,6 +66,14 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
|
||||
return false;
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
|
||||
// In the middle of a trampoline call. Bail out for safety.
|
||||
// This happens rarely so shouldn't affect profiling.
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
frame ret_frame(ret_sp, ret_fp, addr.pc());
|
||||
if (!ret_frame.safe_for_sender(jt)) {
|
||||
#if defined(COMPILER2) || INCLUDE_JVMCI
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
@ -77,6 +78,14 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr,
|
||||
return false;
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
|
||||
// In the middle of a trampoline call. Bail out for safety.
|
||||
// This happens rarely so shouldn't affect profiling.
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
frame ret_frame(ret_sp, frame::unpatchable, addr.pc());
|
||||
|
||||
// we were running Java code when SIGPROF came in
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
@ -70,6 +71,14 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr,
|
||||
return false;
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
|
||||
// In the middle of a trampoline call. Bail out for safety.
|
||||
// This happens rarely so shouldn't affect profiling.
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
// If sp and fp are nonsense just leave them out
|
||||
|
||||
if (!jt->on_local_stack((address)ret_sp)) {
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
@ -72,6 +73,14 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
|
||||
return false;
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) {
|
||||
// In the middle of a trampoline call. Bail out for safety.
|
||||
// This happens rarely so shouldn't affect profiling.
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
frame ret_frame(ret_sp, ret_fp, addr.pc());
|
||||
if (!ret_frame.safe_for_sender(jt)) {
|
||||
#if defined(COMPILER2) || INCLUDE_JVMCI
|
||||
|
@ -33,7 +33,6 @@
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
// This file contains platform-independent assembler declarations.
|
||||
|
||||
|
@ -25,7 +25,8 @@
|
||||
#ifndef SHARE_VM_ASM_REGISTER_HPP
|
||||
#define SHARE_VM_ASM_REGISTER_HPP
|
||||
|
||||
#include "utilities/top.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
// Use AbstractRegister as shortcut
|
||||
class AbstractRegisterImpl;
|
||||
|
@ -556,17 +556,16 @@ void LIR_Assembler::emit_op1(LIR_Op1* op) {
|
||||
leal(op->in_opr(), op->result_opr());
|
||||
break;
|
||||
|
||||
case lir_null_check:
|
||||
if (GenerateCompilerNullChecks) {
|
||||
ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info());
|
||||
case lir_null_check: {
|
||||
ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info());
|
||||
|
||||
if (op->in_opr()->is_single_cpu()) {
|
||||
_masm->null_check(op->in_opr()->as_register(), stub->entry());
|
||||
} else {
|
||||
Unimplemented();
|
||||
}
|
||||
if (op->in_opr()->is_single_cpu()) {
|
||||
_masm->null_check(op->in_opr()->as_register(), stub->entry());
|
||||
} else {
|
||||
Unimplemented();
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case lir_monaddr:
|
||||
monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include "c1/c1_CodeStubs.hpp"
|
||||
#include "ci/ciMethodData.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
class Compilation;
|
||||
class ScopeValue;
|
||||
|
@ -2041,8 +2041,7 @@ void LIRGenerator::do_Throw(Throw* x) {
|
||||
// to avoid a fixed interval with an oop during the null check.
|
||||
// Use a copy of the CodeEmitInfo because debug information is
|
||||
// different for null_check and throw.
|
||||
if (GenerateCompilerNullChecks &&
|
||||
(x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
|
||||
if (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL) {
|
||||
// if the exception object wasn't created using new then it might be null.
|
||||
__ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
|
||||
}
|
||||
|
@ -176,7 +176,7 @@
|
||||
product(bool, InlineSynchronizedMethods, true, \
|
||||
"Inline synchronized methods") \
|
||||
\
|
||||
diagnostic(bool, InlineNIOCheckIndex, true, \
|
||||
develop(bool, InlineNIOCheckIndex, true, \
|
||||
"Intrinsify java.nio.Buffer.checkIndex") \
|
||||
\
|
||||
develop(bool, CanonicalizeNodes, true, \
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "prims/jvm.h"
|
||||
#include "utilities/accessFlags.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
// ciFlags
|
||||
//
|
||||
|
@ -25,8 +25,9 @@
|
||||
#ifndef SHARE_VM_CLASSFILE_CLASSFILESTREAM_HPP
|
||||
#define SHARE_VM_CLASSFILE_CLASSFILESTREAM_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/bytes.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
|
||||
// Input stream for reading .class file
|
||||
//
|
||||
|
@ -2187,43 +2187,19 @@ void java_lang_StackTraceElement::fill_in(Handle element,
|
||||
}
|
||||
|
||||
Method* java_lang_StackFrameInfo::get_method(Handle stackFrame, InstanceKlass* holder, TRAPS) {
|
||||
if (MemberNameInStackFrame) {
|
||||
Handle mname(THREAD, stackFrame->obj_field(_memberName_offset));
|
||||
Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(mname());
|
||||
// we should expand MemberName::name when Throwable uses StackTrace
|
||||
// MethodHandles::expand_MemberName(mname, MethodHandles::_suppress_defc|MethodHandles::_suppress_type, CHECK_NULL);
|
||||
return method;
|
||||
} else {
|
||||
short mid = stackFrame->short_field(_mid_offset);
|
||||
short version = stackFrame->short_field(_version_offset);
|
||||
return holder->method_with_orig_idnum(mid, version);
|
||||
}
|
||||
}
|
||||
|
||||
Symbol* java_lang_StackFrameInfo::get_file_name(Handle stackFrame, InstanceKlass* holder) {
|
||||
if (MemberNameInStackFrame) {
|
||||
return holder->source_file_name();
|
||||
} else {
|
||||
short version = stackFrame->short_field(_version_offset);
|
||||
return Backtrace::get_source_file_name(holder, version);
|
||||
}
|
||||
Handle mname(THREAD, stackFrame->obj_field(_memberName_offset));
|
||||
Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(mname());
|
||||
// we should expand MemberName::name when Throwable uses StackTrace
|
||||
// MethodHandles::expand_MemberName(mname, MethodHandles::_suppress_defc|MethodHandles::_suppress_type, CHECK_NULL);
|
||||
return method;
|
||||
}
|
||||
|
||||
void java_lang_StackFrameInfo::set_method_and_bci(Handle stackFrame, const methodHandle& method, int bci) {
|
||||
// set Method* or mid/cpref
|
||||
if (MemberNameInStackFrame) {
|
||||
oop mname = stackFrame->obj_field(_memberName_offset);
|
||||
InstanceKlass* ik = method->method_holder();
|
||||
CallInfo info(method(), ik);
|
||||
MethodHandles::init_method_MemberName(mname, info);
|
||||
} else {
|
||||
int mid = method->orig_method_idnum();
|
||||
int cpref = method->name_index();
|
||||
assert((jushort)mid == mid, "mid should be short");
|
||||
assert((jushort)cpref == cpref, "cpref should be short");
|
||||
java_lang_StackFrameInfo::set_mid(stackFrame(), (short)mid);
|
||||
java_lang_StackFrameInfo::set_cpref(stackFrame(), (short)cpref);
|
||||
}
|
||||
oop mname = stackFrame->obj_field(_memberName_offset);
|
||||
InstanceKlass* ik = method->method_holder();
|
||||
CallInfo info(method(), ik);
|
||||
MethodHandles::init_method_MemberName(mname, info);
|
||||
// set bci
|
||||
java_lang_StackFrameInfo::set_bci(stackFrame(), bci);
|
||||
// method may be redefined; store the version
|
||||
@ -2232,52 +2208,23 @@ void java_lang_StackFrameInfo::set_method_and_bci(Handle stackFrame, const metho
|
||||
java_lang_StackFrameInfo::set_version(stackFrame(), (short)version);
|
||||
}
|
||||
|
||||
void java_lang_StackFrameInfo::fill_methodInfo(Handle stackFrame, TRAPS) {
|
||||
void java_lang_StackFrameInfo::to_stack_trace_element(Handle stackFrame, Handle stack_trace_element, TRAPS) {
|
||||
ResourceMark rm(THREAD);
|
||||
oop k = stackFrame->obj_field(_declaringClass_offset);
|
||||
InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(k));
|
||||
Handle k (THREAD, stackFrame->obj_field(_declaringClass_offset));
|
||||
InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(k()));
|
||||
Method* method = java_lang_StackFrameInfo::get_method(stackFrame, holder, CHECK);
|
||||
int bci = stackFrame->int_field(_bci_offset);
|
||||
|
||||
// The method can be NULL if the requested class version is gone
|
||||
Symbol* sym = (method != NULL) ? method->name() : NULL;
|
||||
if (MemberNameInStackFrame) {
|
||||
assert(sym != NULL, "MemberName must have method name");
|
||||
} else {
|
||||
// The method can be NULL if the requested class version is gone
|
||||
if (sym == NULL) {
|
||||
short cpref = stackFrame->short_field(_cpref_offset);
|
||||
sym = holder->constants()->symbol_at(cpref);
|
||||
}
|
||||
}
|
||||
|
||||
// set method name
|
||||
oop methodname = StringTable::intern(sym, CHECK);
|
||||
java_lang_StackFrameInfo::set_methodName(stackFrame(), methodname);
|
||||
|
||||
// set file name and line number
|
||||
Symbol* source = get_file_name(stackFrame, holder);
|
||||
if (source != NULL) {
|
||||
oop filename = StringTable::intern(source, CHECK);
|
||||
java_lang_StackFrameInfo::set_fileName(stackFrame(), filename);
|
||||
}
|
||||
|
||||
// if the method has been redefined, the bci is no longer applicable
|
||||
short version = stackFrame->short_field(_version_offset);
|
||||
if (version_matches(method, version)) {
|
||||
int line_number = Backtrace::get_line_number(method, bci);
|
||||
java_lang_StackFrameInfo::set_lineNumber(stackFrame(), line_number);
|
||||
}
|
||||
short bci = stackFrame->short_field(_bci_offset);
|
||||
int cpref = method->name_index();
|
||||
java_lang_StackTraceElement::fill_in(stack_trace_element, holder, method, version, bci, cpref, CHECK);
|
||||
}
|
||||
|
||||
void java_lang_StackFrameInfo::compute_offsets() {
|
||||
Klass* k = SystemDictionary::StackFrameInfo_klass();
|
||||
compute_offset(_declaringClass_offset, k, vmSymbols::declaringClass_name(), vmSymbols::class_signature());
|
||||
compute_offset(_memberName_offset, k, vmSymbols::memberName_name(), vmSymbols::object_signature());
|
||||
compute_offset(_bci_offset, k, vmSymbols::bci_name(), vmSymbols::int_signature());
|
||||
compute_offset(_methodName_offset, k, vmSymbols::methodName_name(), vmSymbols::string_signature());
|
||||
compute_offset(_fileName_offset, k, vmSymbols::fileName_name(), vmSymbols::string_signature());
|
||||
compute_offset(_lineNumber_offset, k, vmSymbols::lineNumber_name(), vmSymbols::int_signature());
|
||||
compute_offset(_bci_offset, k, vmSymbols::bci_name(), vmSymbols::short_signature());
|
||||
STACKFRAMEINFO_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
|
||||
}
|
||||
|
||||
@ -3690,12 +3637,7 @@ int java_lang_StackTraceElement::moduleVersion_offset;
|
||||
int java_lang_StackFrameInfo::_declaringClass_offset;
|
||||
int java_lang_StackFrameInfo::_memberName_offset;
|
||||
int java_lang_StackFrameInfo::_bci_offset;
|
||||
int java_lang_StackFrameInfo::_methodName_offset;
|
||||
int java_lang_StackFrameInfo::_fileName_offset;
|
||||
int java_lang_StackFrameInfo::_lineNumber_offset;
|
||||
int java_lang_StackFrameInfo::_mid_offset;
|
||||
int java_lang_StackFrameInfo::_version_offset;
|
||||
int java_lang_StackFrameInfo::_cpref_offset;
|
||||
int java_lang_LiveStackFrameInfo::_monitors_offset;
|
||||
int java_lang_LiveStackFrameInfo::_locals_offset;
|
||||
int java_lang_LiveStackFrameInfo::_operands_offset;
|
||||
@ -3741,34 +3683,14 @@ void java_lang_StackFrameInfo::set_declaringClass(oop element, oop value) {
|
||||
element->obj_field_put(_declaringClass_offset, value);
|
||||
}
|
||||
|
||||
void java_lang_StackFrameInfo::set_mid(oop element, short value) {
|
||||
element->short_field_put(_mid_offset, value);
|
||||
}
|
||||
|
||||
void java_lang_StackFrameInfo::set_version(oop element, short value) {
|
||||
element->short_field_put(_version_offset, value);
|
||||
}
|
||||
|
||||
void java_lang_StackFrameInfo::set_cpref(oop element, short value) {
|
||||
element->short_field_put(_cpref_offset, value);
|
||||
}
|
||||
|
||||
void java_lang_StackFrameInfo::set_bci(oop element, int value) {
|
||||
element->int_field_put(_bci_offset, value);
|
||||
}
|
||||
|
||||
void java_lang_StackFrameInfo::set_fileName(oop element, oop value) {
|
||||
element->obj_field_put(_fileName_offset, value);
|
||||
}
|
||||
|
||||
void java_lang_StackFrameInfo::set_methodName(oop element, oop value) {
|
||||
element->obj_field_put(_methodName_offset, value);
|
||||
}
|
||||
|
||||
void java_lang_StackFrameInfo::set_lineNumber(oop element, int value) {
|
||||
element->int_field_put(_lineNumber_offset, value);
|
||||
}
|
||||
|
||||
void java_lang_LiveStackFrameInfo::set_monitors(oop element, oop value) {
|
||||
element->obj_field_put(_monitors_offset, value);
|
||||
}
|
||||
|
@ -1364,25 +1364,16 @@ class Backtrace: AllStatic {
|
||||
// Interface to java.lang.StackFrameInfo objects
|
||||
|
||||
#define STACKFRAMEINFO_INJECTED_FIELDS(macro) \
|
||||
macro(java_lang_StackFrameInfo, mid, short_signature, false) \
|
||||
macro(java_lang_StackFrameInfo, version, short_signature, false) \
|
||||
macro(java_lang_StackFrameInfo, cpref, short_signature, false)
|
||||
macro(java_lang_StackFrameInfo, version, short_signature, false)
|
||||
|
||||
class java_lang_StackFrameInfo: AllStatic {
|
||||
private:
|
||||
static int _declaringClass_offset;
|
||||
static int _memberName_offset;
|
||||
static int _bci_offset;
|
||||
static int _methodName_offset;
|
||||
static int _fileName_offset;
|
||||
static int _lineNumber_offset;
|
||||
|
||||
static int _mid_offset;
|
||||
static int _version_offset;
|
||||
static int _cpref_offset;
|
||||
|
||||
static Method* get_method(Handle stackFrame, InstanceKlass* holder, TRAPS);
|
||||
static Symbol* get_file_name(Handle stackFrame, InstanceKlass* holder);
|
||||
|
||||
public:
|
||||
// Setters
|
||||
@ -1390,19 +1381,12 @@ public:
|
||||
static void set_method_and_bci(Handle stackFrame, const methodHandle& method, int bci);
|
||||
static void set_bci(oop info, int value);
|
||||
|
||||
// set method info in an instance of StackFrameInfo
|
||||
static void fill_methodInfo(Handle info, TRAPS);
|
||||
static void set_methodName(oop info, oop value);
|
||||
static void set_fileName(oop info, oop value);
|
||||
static void set_lineNumber(oop info, int value);
|
||||
|
||||
// these injected fields are only used if -XX:-MemberNameInStackFrame set
|
||||
static void set_mid(oop info, short value);
|
||||
static void set_version(oop info, short value);
|
||||
static void set_cpref(oop info, short value);
|
||||
|
||||
static void compute_offsets();
|
||||
|
||||
static void to_stack_trace_element(Handle stackFrame, Handle stack_trace_element, TRAPS);
|
||||
|
||||
// Debugging
|
||||
friend class JavaClasses;
|
||||
};
|
||||
|
@ -222,20 +222,17 @@ inline int Backtrace::get_line_number(const methodHandle& method, int bci) {
|
||||
return line_number;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the source file name of a given InstanceKlass and version
|
||||
*/
|
||||
inline Symbol* Backtrace::get_source_file_name(InstanceKlass* holder, int version) {
|
||||
// Find the specific ik version that contains this source_file_name_index
|
||||
// via the previous versions list, but use the current version's
|
||||
// constant pool to look it up. The previous version's index has been
|
||||
// merged for the current constant pool.
|
||||
InstanceKlass* ik = holder->get_klass_version(version);
|
||||
// This version has been cleaned up.
|
||||
if (ik == NULL) return NULL;
|
||||
int source_file_name_index = ik->source_file_name_index();
|
||||
return (source_file_name_index == 0) ?
|
||||
(Symbol*)NULL : holder->constants()->symbol_at(source_file_name_index);
|
||||
// RedefineClasses() currently permits redefine operations to
|
||||
// happen in parallel using a "last one wins" philosophy. That
|
||||
// spec laxness allows the constant pool entry associated with
|
||||
// the source_file_name_index for any older constant pool version
|
||||
// to be unstable so we shouldn't try to use it.
|
||||
if (holder->constants()->version() != version) {
|
||||
return NULL;
|
||||
} else {
|
||||
return holder->source_file_name();
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_CLASSFILE_JAVACLASSES_INLINE_HPP
|
||||
|
@ -2067,7 +2067,18 @@ bool SystemDictionary::initialize_wk_klass(WKID id, int init_opt, TRAPS) {
|
||||
int sid = (info >> CEIL_LG_OPTION_LIMIT);
|
||||
Symbol* symbol = vmSymbols::symbol_at((vmSymbols::SID)sid);
|
||||
InstanceKlass** klassp = &_well_known_klasses[id];
|
||||
bool must_load = (init_opt < SystemDictionary::Opt);
|
||||
|
||||
bool must_load;
|
||||
#if INCLUDE_JVMCI
|
||||
if (EnableJVMCI) {
|
||||
// If JVMCI is enabled we require its classes to be found.
|
||||
must_load = (init_opt < SystemDictionary::Opt) || (init_opt == SystemDictionary::Jvmci);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
must_load = (init_opt < SystemDictionary::Opt);
|
||||
}
|
||||
|
||||
if ((*klassp) == NULL) {
|
||||
Klass* k;
|
||||
if (must_load) {
|
||||
|
@ -241,7 +241,7 @@ class SystemDictionary : AllStatic {
|
||||
|
||||
Opt, // preload tried; NULL if not present
|
||||
#if INCLUDE_JVMCI
|
||||
Jvmci, // preload tried; error if not present, use only with JVMCI
|
||||
Jvmci, // preload tried; error if not present if JVMCI enabled
|
||||
#endif
|
||||
OPTION_LIMIT,
|
||||
CEIL_LG_OPTION_LIMIT = 2 // OPTION_LIMIT <= (1<<CEIL_LG_OPTION_LIMIT)
|
||||
|
@ -1042,6 +1042,14 @@ void CodeCache::clear_inline_caches() {
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::cleanup_inline_caches() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
NMethodIterator iter;
|
||||
while(iter.next_alive()) {
|
||||
iter.method()->cleanup_inline_caches(/*clean_all=*/true);
|
||||
}
|
||||
}
|
||||
|
||||
// Keeps track of time spent for checking dependencies
|
||||
NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
|
||||
|
||||
|
@ -201,6 +201,7 @@ class CodeCache : AllStatic {
|
||||
static bool needs_cache_clean() { return _needs_cache_clean; }
|
||||
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
||||
static void clear_inline_caches(); // clear all inline caches
|
||||
static void cleanup_inline_caches();
|
||||
|
||||
// Returns true if an own CodeHeap for the given CodeBlobType is available
|
||||
static bool heap_available(int code_blob_type);
|
||||
|
@ -1139,8 +1139,7 @@ void nmethod::clear_ic_stubs() {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void nmethod::cleanup_inline_caches() {
|
||||
void nmethod::cleanup_inline_caches(bool clean_all/*=false*/) {
|
||||
assert_locked_or_safepoint(CompiledIC_lock);
|
||||
|
||||
// If the method is not entrant or zombie then a JMP is plastered over the
|
||||
@ -1170,7 +1169,7 @@ void nmethod::cleanup_inline_caches() {
|
||||
if( cb != NULL && cb->is_nmethod() ) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
// Clean inline caches pointing to zombie, non-entrant and unloaded methods
|
||||
if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
|
||||
if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -1180,7 +1179,7 @@ void nmethod::cleanup_inline_caches() {
|
||||
if( cb != NULL && cb->is_nmethod() ) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
// Clean inline caches pointing to zombie, non-entrant and unloaded methods
|
||||
if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
|
||||
if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -599,7 +599,7 @@ public:
|
||||
// Inline cache support
|
||||
void clear_inline_caches();
|
||||
void clear_ic_stubs();
|
||||
void cleanup_inline_caches();
|
||||
void cleanup_inline_caches(bool clean_all = false);
|
||||
bool inlinecache_check_contains(address addr) const {
|
||||
return (addr >= code_begin() && addr < verified_entry_point());
|
||||
}
|
||||
|
@ -26,8 +26,9 @@
|
||||
#define SHARE_VM_CODE_RELOCINFO_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
class Metadata;
|
||||
class NativeMovConstReg;
|
||||
|
||||
// Types in this file:
|
||||
|
@ -28,10 +28,9 @@
|
||||
#include "asm/register.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
#include "utilities/ostream.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/adlcVMDeps.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
#endif
|
||||
|
||||
//------------------------------VMReg------------------------------------------
|
||||
|
@ -389,13 +389,16 @@ CompileTask* CompileQueue::get() {
|
||||
task = CompilationPolicy::policy()->select_task(this);
|
||||
}
|
||||
|
||||
// Save method pointers across unlock safepoint. The task is removed from
|
||||
// the compilation queue, which is walked during RedefineClasses.
|
||||
save_method = methodHandle(task->method());
|
||||
save_hot_method = methodHandle(task->hot_method());
|
||||
if (task != NULL) {
|
||||
// Save method pointers across unlock safepoint. The task is removed from
|
||||
// the compilation queue, which is walked during RedefineClasses.
|
||||
save_method = methodHandle(task->method());
|
||||
save_hot_method = methodHandle(task->hot_method());
|
||||
|
||||
remove(task);
|
||||
purge_stale_tasks(); // may temporarily release MCQ lock
|
||||
}
|
||||
|
||||
remove(task);
|
||||
purge_stale_tasks(); // may temporarily release MCQ lock
|
||||
return task;
|
||||
}
|
||||
|
||||
@ -1784,7 +1787,8 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
bool is_osr = (osr_bci != standard_entry_bci);
|
||||
bool should_log = (thread->log() != NULL);
|
||||
bool should_break = false;
|
||||
int task_level = task->comp_level();
|
||||
const int task_level = task->comp_level();
|
||||
AbstractCompiler* comp = task->compiler();
|
||||
|
||||
DirectiveSet* directive;
|
||||
{
|
||||
@ -1796,7 +1800,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
assert(!method->is_native(), "no longer compile natives");
|
||||
|
||||
// Look up matching directives
|
||||
directive = DirectivesStack::getMatchingDirective(method, compiler(task_level));
|
||||
directive = DirectivesStack::getMatchingDirective(method, comp);
|
||||
|
||||
// Save information about this method in case of failure.
|
||||
set_last_compile(thread, method, is_osr, task_level);
|
||||
@ -1815,13 +1819,13 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
int compilable = ciEnv::MethodCompilable;
|
||||
const char* failure_reason = NULL;
|
||||
const char* retry_message = NULL;
|
||||
AbstractCompiler *comp = compiler(task_level);
|
||||
|
||||
int system_dictionary_modification_counter;
|
||||
{
|
||||
MutexLocker locker(Compile_lock, thread);
|
||||
system_dictionary_modification_counter = SystemDictionary::number_of_modifications();
|
||||
}
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
if (UseJVMCICompiler && comp != NULL && comp->is_jvmci()) {
|
||||
JVMCICompiler* jvmci = (JVMCICompiler*) comp;
|
||||
|
@ -123,6 +123,13 @@ void CompileTask::initialize(int compile_id,
|
||||
_next = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the compiler for this task.
|
||||
*/
|
||||
AbstractCompiler* CompileTask::compiler() {
|
||||
return CompileBroker::compiler(_comp_level);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileTask::code/set_code
|
||||
//
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -115,6 +115,8 @@ class CompileTask : public CHeapObj<mtCompiler> {
|
||||
int comp_level() { return _comp_level;}
|
||||
void set_comp_level(int comp_level) { _comp_level = comp_level;}
|
||||
|
||||
AbstractCompiler* compiler();
|
||||
|
||||
int num_inlined_bytecodes() const { return _num_inlined_bytecodes; }
|
||||
void set_num_inlined_bytecodes(int n) { _num_inlined_bytecodes = n; }
|
||||
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "gc/shared/genCollectedHeap.hpp"
|
||||
#include "gc/shared/space.inline.hpp"
|
||||
#include "gc/shared/spaceDecorator.hpp"
|
||||
#include "logging/logStream.inline.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.inline.hpp"
|
||||
@ -505,10 +506,13 @@ void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const
|
||||
return;
|
||||
}
|
||||
log.debug("%s", title);
|
||||
_dictionary->report_statistics(log.debug_stream());
|
||||
|
||||
LogStream out(log.debug());
|
||||
_dictionary->report_statistics(&out);
|
||||
|
||||
if (log.is_trace()) {
|
||||
ResourceMark rm;
|
||||
reportIndexedFreeListStatistics(log.trace_stream());
|
||||
LogStream trace_out(log.trace());
|
||||
reportIndexedFreeListStatistics(&trace_out);
|
||||
size_t total_size = totalSizeInIndexedFreeLists() +
|
||||
_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
|
||||
log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
|
||||
|
@ -82,6 +82,8 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||
template <typename SpaceType>
|
||||
friend void CompactibleSpace::scan_and_compact(SpaceType* space);
|
||||
template <typename SpaceType>
|
||||
friend void CompactibleSpace::verify_up_to_first_dead(SpaceType* space);
|
||||
template <typename SpaceType>
|
||||
friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
|
||||
|
||||
// "Size" of chunks of work (executed during parallel remark phases
|
||||
|
@ -425,7 +425,7 @@ void CMSStats::print_on(outputStream *st) const {
|
||||
st->print(",cms_consumption_rate=%g,time_until_full=%g",
|
||||
cms_consumption_rate(), time_until_cms_gen_full());
|
||||
}
|
||||
st->print(" ");
|
||||
st->cr();
|
||||
}
|
||||
#endif // #ifndef PRODUCT
|
||||
|
||||
@ -1108,8 +1108,10 @@ bool ConcurrentMarkSweepGeneration::should_collect(bool full,
|
||||
}
|
||||
|
||||
bool CMSCollector::shouldConcurrentCollect() {
|
||||
LogTarget(Trace, gc) log;
|
||||
|
||||
if (_full_gc_requested) {
|
||||
log_trace(gc)("CMSCollector: collect because of explicit gc request (or GCLocker)");
|
||||
log.print("CMSCollector: collect because of explicit gc request (or GCLocker)");
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1117,21 +1119,22 @@ bool CMSCollector::shouldConcurrentCollect() {
|
||||
// ------------------------------------------------------------------
|
||||
// Print out lots of information which affects the initiation of
|
||||
// a collection.
|
||||
Log(gc) log;
|
||||
if (log.is_trace() && stats().valid()) {
|
||||
log.trace("CMSCollector shouldConcurrentCollect: ");
|
||||
ResourceMark rm;
|
||||
stats().print_on(log.debug_stream());
|
||||
log.trace("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full());
|
||||
log.trace("free=" SIZE_FORMAT, _cmsGen->free());
|
||||
log.trace("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available());
|
||||
log.trace("promotion_rate=%g", stats().promotion_rate());
|
||||
log.trace("cms_allocation_rate=%g", stats().cms_allocation_rate());
|
||||
log.trace("occupancy=%3.7f", _cmsGen->occupancy());
|
||||
log.trace("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
|
||||
log.trace("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
|
||||
log.trace("cms_time_since_end=%3.7f", stats().cms_time_since_end());
|
||||
log.trace("metadata initialized %d", MetaspaceGC::should_concurrent_collect());
|
||||
if (log.is_enabled() && stats().valid()) {
|
||||
log.print("CMSCollector shouldConcurrentCollect: ");
|
||||
|
||||
LogStream out(log);
|
||||
stats().print_on(&out);
|
||||
|
||||
log.print("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full());
|
||||
log.print("free=" SIZE_FORMAT, _cmsGen->free());
|
||||
log.print("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available());
|
||||
log.print("promotion_rate=%g", stats().promotion_rate());
|
||||
log.print("cms_allocation_rate=%g", stats().cms_allocation_rate());
|
||||
log.print("occupancy=%3.7f", _cmsGen->occupancy());
|
||||
log.print("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
|
||||
log.print("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
|
||||
log.print("cms_time_since_end=%3.7f", stats().cms_time_since_end());
|
||||
log.print("metadata initialized %d", MetaspaceGC::should_concurrent_collect());
|
||||
}
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
@ -1149,8 +1152,8 @@ bool CMSCollector::shouldConcurrentCollect() {
|
||||
// this branch will not fire after the first successful CMS
|
||||
// collection because the stats should then be valid.
|
||||
if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
|
||||
log_trace(gc)(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f",
|
||||
_cmsGen->occupancy(), _bootstrap_occupancy);
|
||||
log.print(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f",
|
||||
_cmsGen->occupancy(), _bootstrap_occupancy);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -1162,7 +1165,7 @@ bool CMSCollector::shouldConcurrentCollect() {
|
||||
// XXX We need to make sure that the gen expansion
|
||||
// criterion dovetails well with this. XXX NEED TO FIX THIS
|
||||
if (_cmsGen->should_concurrent_collect()) {
|
||||
log_trace(gc)("CMS old gen initiated");
|
||||
log.print("CMS old gen initiated");
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1173,12 +1176,12 @@ bool CMSCollector::shouldConcurrentCollect() {
|
||||
assert(gch->collector_policy()->is_generation_policy(),
|
||||
"You may want to check the correctness of the following");
|
||||
if (gch->incremental_collection_will_fail(true /* consult_young */)) {
|
||||
log_trace(gc)("CMSCollector: collect because incremental collection will fail ");
|
||||
log.print("CMSCollector: collect because incremental collection will fail ");
|
||||
return true;
|
||||
}
|
||||
|
||||
if (MetaspaceGC::should_concurrent_collect()) {
|
||||
log_trace(gc)("CMSCollector: collect for metadata allocation ");
|
||||
log.print("CMSCollector: collect for metadata allocation ");
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1193,10 +1196,10 @@ bool CMSCollector::shouldConcurrentCollect() {
|
||||
// as we want to be able to trigger the first CMS cycle as well)
|
||||
if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
|
||||
if (stats().valid()) {
|
||||
log_trace(gc)("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
|
||||
stats().cms_time_since_begin());
|
||||
log.print("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
|
||||
stats().cms_time_since_begin());
|
||||
} else {
|
||||
log_trace(gc)("CMSCollector: collect because of trigger interval (first collection)");
|
||||
log.print("CMSCollector: collect because of trigger interval (first collection)");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -206,8 +206,14 @@ public:
|
||||
return 0;
|
||||
}
|
||||
if (hr->is_humongous()) {
|
||||
mark_card_bitmap_range(start, hr->top());
|
||||
return pointer_delta(hr->top(), start, 1);
|
||||
HeapRegion* start_region = hr->humongous_start_region();
|
||||
if (mark_bitmap->isMarked(start_region->bottom())) {
|
||||
mark_card_bitmap_range(start, hr->top());
|
||||
return pointer_delta(hr->top(), start, 1);
|
||||
} else {
|
||||
// Humongous start object was actually dead.
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
|
||||
|
@ -1829,10 +1829,14 @@ G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* des
|
||||
HeapRegion::GrainBytes,
|
||||
translation_factor,
|
||||
mtGC);
|
||||
if (TracePageSizes) {
|
||||
tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
|
||||
description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
|
||||
}
|
||||
|
||||
os::trace_page_sizes_for_requested_size(description,
|
||||
size,
|
||||
preferred_page_size,
|
||||
rs.alignment(),
|
||||
rs.base(),
|
||||
rs.size());
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1906,26 +1910,28 @@ jint G1CollectedHeap::initialize() {
|
||||
HeapRegion::GrainBytes,
|
||||
1,
|
||||
mtJavaHeap);
|
||||
os::trace_page_sizes("G1 Heap", collector_policy()->min_heap_byte_size(),
|
||||
max_byte_size, page_size,
|
||||
os::trace_page_sizes("Heap",
|
||||
collector_policy()->min_heap_byte_size(),
|
||||
max_byte_size,
|
||||
page_size,
|
||||
heap_rs.base(),
|
||||
heap_rs.size());
|
||||
heap_storage->set_mapping_changed_listener(&_listener);
|
||||
|
||||
// Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
|
||||
G1RegionToSpaceMapper* bot_storage =
|
||||
create_aux_memory_mapper("Block offset table",
|
||||
create_aux_memory_mapper("Block Offset Table",
|
||||
G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
|
||||
G1BlockOffsetTable::heap_map_factor());
|
||||
|
||||
ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
|
||||
G1RegionToSpaceMapper* cardtable_storage =
|
||||
create_aux_memory_mapper("Card table",
|
||||
create_aux_memory_mapper("Card Table",
|
||||
G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
|
||||
G1SATBCardTableLoggingModRefBS::heap_map_factor());
|
||||
|
||||
G1RegionToSpaceMapper* card_counts_storage =
|
||||
create_aux_memory_mapper("Card counts table",
|
||||
create_aux_memory_mapper("Card Counts Table",
|
||||
G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
|
||||
G1CardCounts::heap_map_factor());
|
||||
|
||||
@ -2736,7 +2742,7 @@ void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
|
||||
_cmThread->print_on(st);
|
||||
st->cr();
|
||||
_cm->print_worker_threads_on(st);
|
||||
_cg1r->print_worker_threads_on(st);
|
||||
_cg1r->print_worker_threads_on(st); // also prints the sample thread
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
G1StringDedup::print_worker_threads_on(st);
|
||||
}
|
||||
@ -2745,7 +2751,8 @@ void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
|
||||
void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
|
||||
workers()->threads_do(tc);
|
||||
tc->do_thread(_cmThread);
|
||||
_cg1r->threads_do(tc);
|
||||
_cm->threads_do(tc);
|
||||
_cg1r->threads_do(tc); // also iterates over the sample thread
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
G1StringDedup::threads_do(tc);
|
||||
}
|
||||
@ -2940,13 +2947,17 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
|
||||
: rset->is_empty();
|
||||
}
|
||||
|
||||
bool is_typeArray_region(HeapRegion* region) const {
|
||||
return oop(region->bottom())->is_typeArray();
|
||||
}
|
||||
|
||||
bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
|
||||
assert(region->is_starts_humongous(), "Must start a humongous object");
|
||||
|
||||
oop obj = oop(region->bottom());
|
||||
|
||||
// Dead objects cannot be eager reclaim candidates. Due to class
|
||||
// unloading it is unsafe to query their classes so we return early.
|
||||
if (heap->is_obj_dead(obj, region)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Candidate selection must satisfy the following constraints
|
||||
// while concurrent marking is in progress:
|
||||
//
|
||||
@ -2983,7 +2994,7 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
|
||||
// important use case for eager reclaim, and this special handling
|
||||
// may reduce needed headroom.
|
||||
|
||||
return is_typeArray_region(region) && is_remset_small(region);
|
||||
return obj->is_typeArray() && is_remset_small(region);
|
||||
}
|
||||
|
||||
public:
|
||||
@ -4441,7 +4452,6 @@ void G1CollectedHeap::process_weak_jni_handles() {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
|
||||
double preserve_cm_referents_start = os::elapsedTime();
|
||||
// Any reference objects, in the collection set, that were 'discovered'
|
||||
// by the CM ref processor should have already been copied (either by
|
||||
// applying the external root copy closure to the discovered lists, or
|
||||
@ -4462,16 +4472,24 @@ void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_
|
||||
// objects discovered by the STW ref processor in case one of these
|
||||
// referents points to another object which is also referenced by an
|
||||
// object discovered by the STW ref processor.
|
||||
double preserve_cm_referents_time = 0.0;
|
||||
|
||||
uint no_of_gc_workers = workers()->active_workers();
|
||||
// To avoid spawning task when there is no work to do, check that
|
||||
// a concurrent cycle is active and that some references have been
|
||||
// discovered.
|
||||
if (concurrent_mark()->cmThread()->during_cycle() &&
|
||||
ref_processor_cm()->has_discovered_references()) {
|
||||
double preserve_cm_referents_start = os::elapsedTime();
|
||||
uint no_of_gc_workers = workers()->active_workers();
|
||||
G1ParPreserveCMReferentsTask keep_cm_referents(this,
|
||||
per_thread_states,
|
||||
no_of_gc_workers,
|
||||
_task_queues);
|
||||
workers()->run_task(&keep_cm_referents);
|
||||
preserve_cm_referents_time = os::elapsedTime() - preserve_cm_referents_start;
|
||||
}
|
||||
|
||||
G1ParPreserveCMReferentsTask keep_cm_referents(this,
|
||||
per_thread_states,
|
||||
no_of_gc_workers,
|
||||
_task_queues);
|
||||
workers()->run_task(&keep_cm_referents);
|
||||
|
||||
g1_policy()->phase_times()->record_preserve_cm_referents_time_ms((os::elapsedTime() - preserve_cm_referents_start) * 1000.0);
|
||||
g1_policy()->phase_times()->record_preserve_cm_referents_time_ms(preserve_cm_referents_time * 1000.0);
|
||||
}
|
||||
|
||||
// Weak Reference processing during an evacuation pause (part 1).
|
||||
@ -4818,6 +4836,9 @@ void G1CollectedHeap::cleanUpCardTable() {
|
||||
|
||||
workers()->run_task(&cleanup_task);
|
||||
#ifndef PRODUCT
|
||||
// Need to synchronize with concurrent cleanup since it needs to
|
||||
// finish its card table clearing before we can verify.
|
||||
wait_while_free_regions_coming();
|
||||
_verifier->verify_card_table_cleanup();
|
||||
#endif
|
||||
}
|
||||
|
@ -2112,6 +2112,10 @@ void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
|
||||
_parallel_workers->print_worker_threads_on(st);
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
|
||||
_parallel_workers->threads_do(tc);
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::print_on_error(outputStream* st) const {
|
||||
st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
|
||||
p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
|
||||
|
@ -621,6 +621,7 @@ public:
|
||||
void print_summary_info();
|
||||
|
||||
void print_worker_threads_on(outputStream* st) const;
|
||||
void threads_do(ThreadClosure* tc) const;
|
||||
|
||||
void print_on_error(outputStream* st) const;
|
||||
|
||||
|
@ -25,12 +25,13 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1YoungGenSizer.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "logging/log.hpp"
|
||||
|
||||
G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
|
||||
_min_desired_young_length(0), _max_desired_young_length(0) {
|
||||
if (FLAG_IS_CMDLINE(NewRatio)) {
|
||||
if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
|
||||
log_warning(gc, ergo)("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
|
||||
} else {
|
||||
_sizer_kind = SizerNewRatio;
|
||||
_adaptive_size = false;
|
||||
@ -40,9 +41,9 @@ G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(
|
||||
|
||||
if (NewSize > MaxNewSize) {
|
||||
if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
|
||||
"A new max generation size of " SIZE_FORMAT "k will be used.",
|
||||
NewSize/K, MaxNewSize/K, NewSize/K);
|
||||
log_warning(gc, ergo)("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
|
||||
"A new max generation size of " SIZE_FORMAT "k will be used.",
|
||||
NewSize/K, MaxNewSize/K, NewSize/K);
|
||||
}
|
||||
MaxNewSize = NewSize;
|
||||
}
|
||||
|
@ -26,18 +26,6 @@
|
||||
#include "gc/parallel/generationSizer.hpp"
|
||||
#include "gc/shared/collectorPolicy.hpp"
|
||||
|
||||
void GenerationSizer::trace_gen_sizes(const char* const str) {
|
||||
if (TracePageSizes) {
|
||||
tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " "
|
||||
SIZE_FORMAT "," SIZE_FORMAT " "
|
||||
SIZE_FORMAT,
|
||||
str,
|
||||
_min_old_size / K, _max_old_size / K,
|
||||
_min_young_size / K, _max_young_size / K,
|
||||
_max_heap_byte_size / K);
|
||||
}
|
||||
}
|
||||
|
||||
void GenerationSizer::initialize_alignments() {
|
||||
_space_alignment = _gen_alignment = default_gen_alignment();
|
||||
_heap_alignment = compute_heap_alignment();
|
||||
@ -60,7 +48,6 @@ void GenerationSizer::initialize_flags() {
|
||||
}
|
||||
|
||||
void GenerationSizer::initialize_size_info() {
|
||||
trace_gen_sizes("ps heap raw");
|
||||
const size_t max_page_sz = os::page_size_for_region_aligned(_max_heap_byte_size, 8);
|
||||
const size_t min_pages = 4; // 1 for eden + 1 for each survivor + 1 for old
|
||||
const size_t min_page_sz = os::page_size_for_region_aligned(_min_heap_byte_size, min_pages);
|
||||
@ -76,6 +63,4 @@ void GenerationSizer::initialize_size_info() {
|
||||
initialize_flags();
|
||||
}
|
||||
GenCollectorPolicy::initialize_size_info();
|
||||
|
||||
trace_gen_sizes("ps heap rnd");
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user