This commit is contained in:
J. Duke 2017-07-05 21:56:30 +02:00
commit cfa86260aa
840 changed files with 82967 additions and 46268 deletions

View File

@ -368,3 +368,4 @@ cae471d3b87783e0a3deea658e1e1c84b2485b6c jdk-9+121
405d811c0d7b9b48ff718ae6c240b732f098c028 jdk-9+123
f80c841ae2545eaf9acd2724bccc305d98cefbe2 jdk-9+124
9aa7d40f3a453f51e47f4c1b19eff5740a74a9f8 jdk-9+125
3a58466296d36944454756ef01e7513ac5e14a16 jdk-9+126

View File

@ -976,7 +976,6 @@ JVM_VARIANTS
DEBUG_LEVEL
HOTSPOT_DEBUG_LEVEL
JDK_VARIANT
SET_OPENJDK
USERNAME
CANONICAL_TOPDIR
ORIGINAL_TOPDIR
@ -5095,7 +5094,7 @@ VS_SDK_PLATFORM_NAME_2013=
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1467223237
DATE_WHEN_GENERATED=1467960715
###############################################################################
#
@ -16592,41 +16591,17 @@ else
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for presence of closed sources" >&5
$as_echo_n "checking for presence of closed sources... " >&6; }
if test -d "$SRC_ROOT/jdk/src/closed"; then
CLOSED_SOURCE_PRESENT=yes
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if custom source is suppressed (openjdk-only)" >&5
$as_echo_n "checking if custom source is suppressed (openjdk-only)... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_openjdk_only" >&5
$as_echo "$enable_openjdk_only" >&6; }
if test "x$enable_openjdk_only" = "xyes"; then
SUPPRESS_CUSTOM_EXTENSIONS="true"
elif test "x$enable_openjdk_only" = "xno"; then
SUPPRESS_CUSTOM_EXTENSIONS="false"
else
CLOSED_SOURCE_PRESENT=no
as_fn_error $? "Invalid value for --enable-openjdk-only: $enable_openjdk_only" "$LINENO" 5
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CLOSED_SOURCE_PRESENT" >&5
$as_echo "$CLOSED_SOURCE_PRESENT" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if closed source is suppressed (openjdk-only)" >&5
$as_echo_n "checking if closed source is suppressed (openjdk-only)... " >&6; }
SUPPRESS_CLOSED_SOURCE="$enable_openjdk_only"
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $SUPPRESS_CLOSED_SOURCE" >&5
$as_echo "$SUPPRESS_CLOSED_SOURCE" >&6; }
if test "x$CLOSED_SOURCE_PRESENT" = xno; then
OPENJDK=true
if test "x$SUPPRESS_CLOSED_SOURCE" = "xyes"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: No closed source present, --enable-openjdk-only makes no sense" >&5
$as_echo "$as_me: WARNING: No closed source present, --enable-openjdk-only makes no sense" >&2;}
fi
else
if test "x$SUPPRESS_CLOSED_SOURCE" = "xyes"; then
OPENJDK=true
else
OPENJDK=false
fi
fi
if test "x$OPENJDK" = "xtrue"; then
SET_OPENJDK="OPENJDK=true"
fi
# custom-make-dir is deprecated. Please use your custom-hook.m4 to override
# the IncludeCustomExtension macro.
@ -53313,11 +53288,7 @@ $as_echo "yes, forced" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no, forced" >&5
$as_echo "no, forced" >&6; }
elif test "x$enable_dtrace" = "xauto" || test "x$enable_dtrace" = "x"; then
if test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK" != "xtrue"; then
INCLUDE_DTRACE=false
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no, non-open linux build" >&5
$as_echo "no, non-open linux build" >&6; }
elif test "x$DTRACE_DEP_MISSING" = "xtrue"; then
if test "x$DTRACE_DEP_MISSING" = "xtrue"; then
INCLUDE_DTRACE=false
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no, missing dependencies" >&5
$as_echo "no, missing dependencies" >&6; }
@ -53596,11 +53567,9 @@ $as_echo "yes" >&6; }
NEEDS_LIB_CUPS=true
fi
# Check if freetype is needed
if test "x$OPENJDK" = "xtrue"; then
# A custom hook may have set this already
if test "x$NEEDS_LIB_FREETYPE" = "x"; then
NEEDS_LIB_FREETYPE=true
else
NEEDS_LIB_FREETYPE=false
fi
# Check if alsa is needed

View File

@ -161,10 +161,7 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_DTRACE],
INCLUDE_DTRACE=false
AC_MSG_RESULT([no, forced])
elif test "x$enable_dtrace" = "xauto" || test "x$enable_dtrace" = "x"; then
if test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK" != "xtrue"; then
INCLUDE_DTRACE=false
AC_MSG_RESULT([no, non-open linux build])
elif test "x$DTRACE_DEP_MISSING" = "xtrue"; then
if test "x$DTRACE_DEP_MISSING" = "xtrue"; then
INCLUDE_DTRACE=false
AC_MSG_RESULT([no, missing dependencies])
else

View File

@ -117,36 +117,15 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_OPEN_OR_CUSTOM],
AC_ARG_ENABLE([openjdk-only], [AS_HELP_STRING([--enable-openjdk-only],
[suppress building custom source even if present @<:@disabled@:>@])],,[enable_openjdk_only="no"])
AC_MSG_CHECKING([for presence of closed sources])
if test -d "$SRC_ROOT/jdk/src/closed"; then
CLOSED_SOURCE_PRESENT=yes
AC_MSG_CHECKING([if custom source is suppressed (openjdk-only)])
AC_MSG_RESULT([$enable_openjdk_only])
if test "x$enable_openjdk_only" = "xyes"; then
SUPPRESS_CUSTOM_EXTENSIONS="true"
elif test "x$enable_openjdk_only" = "xno"; then
SUPPRESS_CUSTOM_EXTENSIONS="false"
else
CLOSED_SOURCE_PRESENT=no
AC_MSG_ERROR([Invalid value for --enable-openjdk-only: $enable_openjdk_only])
fi
AC_MSG_RESULT([$CLOSED_SOURCE_PRESENT])
AC_MSG_CHECKING([if closed source is suppressed (openjdk-only)])
SUPPRESS_CLOSED_SOURCE="$enable_openjdk_only"
AC_MSG_RESULT([$SUPPRESS_CLOSED_SOURCE])
if test "x$CLOSED_SOURCE_PRESENT" = xno; then
OPENJDK=true
if test "x$SUPPRESS_CLOSED_SOURCE" = "xyes"; then
AC_MSG_WARN([No closed source present, --enable-openjdk-only makes no sense])
fi
else
if test "x$SUPPRESS_CLOSED_SOURCE" = "xyes"; then
OPENJDK=true
else
OPENJDK=false
fi
fi
if test "x$OPENJDK" = "xtrue"; then
SET_OPENJDK="OPENJDK=true"
fi
AC_SUBST(SET_OPENJDK)
# custom-make-dir is deprecated. Please use your custom-hook.m4 to override
# the IncludeCustomExtension macro.

View File

@ -59,11 +59,9 @@ AC_DEFUN_ONCE([LIB_DETERMINE_DEPENDENCIES],
NEEDS_LIB_CUPS=true
fi
# Check if freetype is needed
if test "x$OPENJDK" = "xtrue"; then
# A custom hook may have set this already
if test "x$NEEDS_LIB_FREETYPE" = "x"; then
NEEDS_LIB_FREETYPE=true
else
NEEDS_LIB_FREETYPE=false
fi
# Check if alsa is needed

View File

@ -108,7 +108,6 @@ OPENJDK_BUILD_CPU_ENDIAN:=@OPENJDK_BUILD_CPU_ENDIAN@
REQUIRED_OS_NAME:=@REQUIRED_OS_NAME@
REQUIRED_OS_VERSION:=@REQUIRED_OS_VERSION@
@SET_OPENJDK@
LIBM:=@LIBM@
LIBDL:=@LIBDL@

File diff suppressed because it is too large Load Diff

View File

@ -6,7 +6,7 @@
<name>OpenJDK (Native)</name>
<c-extensions>c,m</c-extensions>
<cpp-extensions>cc,cpp</cpp-extensions>
<header-extensions>ad,h,hpp,in_out</header-extensions>
<header-extensions>ad,h,hh,hpp,in_out,map,txt</header-extensions>
<sourceEncoding>UTF-8</sourceEncoding>
<make-dep-projects/>
<sourceRootList>
@ -26,6 +26,9 @@
<type>0</type>
</confElem>
</confList>
<formatting>
<project-formatting-style>false</project-formatting-style>
</formatting>
</data>
</configuration>
</project>

View File

@ -528,3 +528,4 @@ af6b4ad908e732d23021f12e8322b204433d5cf6 jdk-9+122
75f81e1fecfb444f34f357295fe06af60e2762d9 jdk-9+123
479631362b4930be985245ea063d87d821a472eb jdk-9+124
bb640b49741af3f57f9994129934c46fc173219f jdk-9+125
adc8c84b7cf8c540d920182f78a2bc982366432a jdk-9+126

View File

@ -186,7 +186,8 @@ To build hotspot and import it into the JDK: "mx make hotspot import-hotspot"
# Might be building with JDK8 which has cacerts under jre/
srcCerts = join(mx.get_jdk(tag='default').home, 'jre', 'lib', 'security', 'cacerts')
dstCerts = join(jdkImageDir, 'lib', 'security', 'cacerts')
shutil.copyfile(srcCerts, dstCerts)
if srcCerts != dstCerts:
shutil.copyfile(srcCerts, dstCerts)
_create_jdk_bundle(jdkBuildDir, _vm.debugLevel, jdkImageDir)

View File

@ -149,7 +149,6 @@ suite = {
"subDir" : "src/jdk.vm.ci/share/classes",
"sourceDirs" : ["src"],
"dependencies" : [
"jdk.vm.ci.hotspotvmconfig",
"jdk.vm.ci.common",
"jdk.vm.ci.runtime",
"jdk.vm.ci.services",
@ -175,14 +174,6 @@ suite = {
"workingSets" : "API,JVMCI",
},
"jdk.vm.ci.hotspotvmconfig" : {
"subDir" : "src/jdk.vm.ci/share/classes",
"sourceDirs" : ["src"],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9",
"workingSets" : "JVMCI,HotSpot",
},
"jdk.vm.ci.hotspot.aarch64" : {
"subDir" : "src/jdk.vm.ci/share/classes",
"sourceDirs" : ["src"],
@ -248,13 +239,6 @@ suite = {
],
},
"JVMCI_HOTSPOTVMCONFIG" : {
"subDir" : "src/jdk.vm.ci/share/classes",
"dependencies" : [
"jdk.vm.ci.hotspotvmconfig",
],
},
"JVMCI_HOTSPOT" : {
"subDir" : "src/jdk.vm.ci/share/classes",
"dependencies" : [
@ -263,7 +247,6 @@ suite = {
"jdk.vm.ci.hotspot.sparc",
],
"distDependencies" : [
"JVMCI_HOTSPOTVMCONFIG",
"JVMCI_SERVICES",
"JVMCI_API",
],

View File

@ -23,7 +23,10 @@
# questions.
#
GTEST_TEST_SRC := $(HOTSPOT_TOPDIR)/test/native
$(eval $(call IncludeCustomExtension, hotspot, lib/CompileGtest.gmk))
GTEST_TEST_SRC += $(HOTSPOT_TOPDIR)/test/native
GTEST_LAUNCHER_SRC := $(HOTSPOT_TOPDIR)/test/native/gtestLauncher.cpp
GTEST_FRAMEWORK_SRC := $(SRC_ROOT)/test/fmw/gtest
# On Windows, there are no internal debug symbols so must set copying to true
@ -65,7 +68,7 @@ $(eval $(call SetupNativeCompilation, BUILD_GTEST_LIBJVM, \
$(BUILD_LIBJVM_ALL_OBJS)), \
CFLAGS := $(JVM_CFLAGS) -I$(GTEST_FRAMEWORK_SRC) \
-I$(GTEST_FRAMEWORK_SRC)/include \
-I$(GTEST_TEST_SRC), \
$(addprefix -I,$(GTEST_TEST_SRC)), \
CFLAGS_windows := /EHsc, \
CFLAGS_solaris := -DGTEST_HAS_EXCEPTIONS=0 -library=stlport4, \
CFLAGS_macosx := -DGTEST_OS_MAC=1, \
@ -73,7 +76,7 @@ $(eval $(call SetupNativeCompilation, BUILD_GTEST_LIBJVM, \
CXXFLAGS_DEBUG_SYMBOLS := $(JVM_CFLAGS_SYMBOLS), \
DISABLED_WARNINGS_gcc := undef, \
DISABLED_WARNINGS_clang := undef switch format-nonliteral \
tautological-undefined-compare, \
tautological-undefined-compare $(BUILD_LIBJVM_DISABLED_WARNINGS_clang), \
DISABLED_WARNINGS_solstudio := identexpected, \
LDFLAGS := $(JVM_LDFLAGS), \
LDFLAGS_solaris := -library=stlport4 $(call SET_SHARED_LIBRARY_ORIGIN), \
@ -83,6 +86,8 @@ $(eval $(call SetupNativeCompilation, BUILD_GTEST_LIBJVM, \
USE_MAPFILE_FOR_SYMBOLS := true, \
COPY_DEBUG_SYMBOLS := $(GTEST_COPY_DEBUG_SYMBOLS), \
ZIP_EXTERNAL_DEBUG_SYMBOLS := false, \
PRECOMPILED_HEADER := $(JVM_PRECOMPILED_HEADER), \
PRECOMPILED_HEADER_EXCLUDE := gtest-all.cc gtestMain.cpp, \
))
TARGETS += $(BUILD_GTEST_LIBJVM)
@ -93,7 +98,7 @@ $(eval $(call SetupNativeCompilation, BUILD_GTEST_LAUNCHER, \
TOOLCHAIN := $(JVM_TOOLCHAIN), \
PROGRAM := gtestLauncher, \
OUTPUT_DIR := $(JVM_OUTPUTDIR)/gtest, \
EXTRA_FILES := $(GTEST_TEST_SRC)/gtestLauncher.cpp, \
EXTRA_FILES := $(GTEST_LAUNCHER_SRC), \
OBJECT_DIR := $(JVM_OUTPUTDIR)/gtest/launcher-objs, \
CFLAGS := $(JVM_CFLAGS) -I$(GTEST_FRAMEWORK_SRC) \
-I$(GTEST_FRAMEWORK_SRC)/include, \

View File

@ -35,6 +35,7 @@ endif
ifeq ($(OPENJDK_TARGET_OS), linux)
BUILD_LIBJVM_ostream.cpp_CXXFLAGS := -D_FILE_OFFSET_BITS=64
BUILD_LIBJVM_logFileOutput.cpp_CXXFLAGS := -D_FILE_OFFSET_BITS=64
ifeq ($(OPENJDK_TARGET_CPU_ARCH), x86)
BUILD_LIBJVM_sharedRuntimeTrig.cpp_CXXFLAGS := -DNO_PCH $(CXX_O_FLAG_NONE)

View File

@ -6629,6 +6629,82 @@ operand cmpOpU()
%}
%}
// used for certain integral comparisons which can be
// converted to cbxx or tbxx instructions
operand cmpOpEqNe()
%{
match(Bool);
match(CmpOp);
op_cost(0);
predicate(n->as_Bool()->_test._test == BoolTest::ne
|| n->as_Bool()->_test._test == BoolTest::eq);
format %{ "" %}
interface(COND_INTER) %{
equal(0x0, "eq");
not_equal(0x1, "ne");
less(0xb, "lt");
greater_equal(0xa, "ge");
less_equal(0xd, "le");
greater(0xc, "gt");
overflow(0x6, "vs");
no_overflow(0x7, "vc");
%}
%}
// used for certain integral comparisons which can be
// converted to cbxx or tbxx instructions
operand cmpOpLtGe()
%{
match(Bool);
match(CmpOp);
op_cost(0);
predicate(n->as_Bool()->_test._test == BoolTest::lt
|| n->as_Bool()->_test._test == BoolTest::ge);
format %{ "" %}
interface(COND_INTER) %{
equal(0x0, "eq");
not_equal(0x1, "ne");
less(0xb, "lt");
greater_equal(0xa, "ge");
less_equal(0xd, "le");
greater(0xc, "gt");
overflow(0x6, "vs");
no_overflow(0x7, "vc");
%}
%}
// used for certain unsigned integral comparisons which can be
// converted to cbxx or tbxx instructions
operand cmpOpUEqNeLtGe()
%{
match(Bool);
match(CmpOp);
op_cost(0);
predicate(n->as_Bool()->_test._test == BoolTest::eq
|| n->as_Bool()->_test._test == BoolTest::ne
|| n->as_Bool()->_test._test == BoolTest::lt
|| n->as_Bool()->_test._test == BoolTest::ge);
format %{ "" %}
interface(COND_INTER) %{
equal(0x0, "eq");
not_equal(0x1, "ne");
less(0xb, "lt");
greater_equal(0xa, "ge");
less_equal(0xd, "le");
greater(0xc, "gt");
overflow(0x6, "vs");
no_overflow(0x7, "vc");
%}
%}
// Special operand allowing long args to int ops to be truncated for free
operand iRegL2I(iRegL reg) %{
@ -14286,10 +14362,8 @@ instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
// shorter than (cmp; branch), have the additional benefit of not
// killing the flags.
instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
match(If cmp (CmpI op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq);
effect(USE labl);
ins_cost(BRANCH_COST);
@ -14305,10 +14379,8 @@ instruct cmpI_imm0_branch(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFla
ins_pipe(pipe_cmp_branch);
%}
instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
match(If cmp (CmpL op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq);
effect(USE labl);
ins_cost(BRANCH_COST);
@ -14324,10 +14396,8 @@ instruct cmpL_imm0_branch(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg
ins_pipe(pipe_cmp_branch);
%}
instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
match(If cmp (CmpP op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq);
effect(USE labl);
ins_cost(BRANCH_COST);
@ -14343,10 +14413,8 @@ instruct cmpP_imm0_branch(cmpOp cmp, iRegP op1, immP0 op2, label labl, rFlagsReg
ins_pipe(pipe_cmp_branch);
%}
instruct cmpN_imm0_branch(cmpOp cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
match(If cmp (CmpN op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq);
effect(USE labl);
ins_cost(BRANCH_COST);
@ -14362,10 +14430,8 @@ instruct cmpN_imm0_branch(cmpOp cmp, iRegN op1, immN0 op2, label labl, rFlagsReg
ins_pipe(pipe_cmp_branch);
%}
instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
match(If cmp (CmpP (DecodeN oop) zero));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq);
effect(USE labl);
ins_cost(BRANCH_COST);
@ -14381,12 +14447,8 @@ instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl
ins_pipe(pipe_cmp_branch);
%}
instruct cmpUI_imm0_branch(cmpOpU cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{
match(If cmp (CmpU op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq
|| n->in(1)->as_Bool()->_test._test == BoolTest::gt
|| n->in(1)->as_Bool()->_test._test == BoolTest::le);
effect(USE labl);
ins_cost(BRANCH_COST);
@ -14402,12 +14464,8 @@ instruct cmpUI_imm0_branch(cmpOpU cmp, iRegIorL2I op1, immI0 op2, label labl, rF
ins_pipe(pipe_cmp_branch);
%}
instruct cmpUL_imm0_branch(cmpOpU cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{
match(If cmp (CmpU op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::ne
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq
|| n->in(1)->as_Bool()->_test._test == BoolTest::gt
|| n->in(1)->as_Bool()->_test._test == BoolTest::le);
effect(USE labl);
ins_cost(BRANCH_COST);
@ -14426,10 +14484,8 @@ instruct cmpUL_imm0_branch(cmpOpU cmp, iRegL op1, immL0 op2, label labl, rFlagsR
// Test bit and Branch
// Patterns for short (< 32KiB) variants
instruct cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
match(If cmp (CmpL op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
|| n->in(1)->as_Bool()->_test._test == BoolTest::ge);
effect(USE labl);
ins_cost(BRANCH_COST);
@ -14444,10 +14500,8 @@ instruct cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
ins_short_branch(1);
%}
instruct cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
match(If cmp (CmpI op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
|| n->in(1)->as_Bool()->_test._test == BoolTest::ge);
effect(USE labl);
ins_cost(BRANCH_COST);
@ -14462,11 +14516,9 @@ instruct cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
ins_short_branch(1);
%}
instruct cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
match(If cmp (CmpL (AndL op1 op2) op3));
predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq)
&& is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
effect(USE labl);
ins_cost(BRANCH_COST);
@ -14481,11 +14533,9 @@ instruct cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl)
ins_short_branch(1);
%}
instruct cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
match(If cmp (CmpI (AndI op1 op2) op3));
predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq)
&& is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
effect(USE labl);
ins_cost(BRANCH_COST);
@ -14501,10 +14551,8 @@ instruct cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label l
%}
// And far variants
instruct far_cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
match(If cmp (CmpL op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
|| n->in(1)->as_Bool()->_test._test == BoolTest::ge);
effect(USE labl);
ins_cost(BRANCH_COST);
@ -14518,10 +14566,8 @@ instruct far_cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
ins_pipe(pipe_cmp_branch);
%}
instruct far_cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
match(If cmp (CmpI op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
|| n->in(1)->as_Bool()->_test._test == BoolTest::ge);
effect(USE labl);
ins_cost(BRANCH_COST);
@ -14535,11 +14581,9 @@ instruct far_cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl)
ins_pipe(pipe_cmp_branch);
%}
instruct far_cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
match(If cmp (CmpL (AndL op1 op2) op3));
predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq)
&& is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
effect(USE labl);
ins_cost(BRANCH_COST);
@ -14553,11 +14597,9 @@ instruct far_cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label la
ins_pipe(pipe_cmp_branch);
%}
instruct far_cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
match(If cmp (CmpI (AndI op1 op2) op3));
predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq)
&& is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
effect(USE labl);
ins_cost(BRANCH_COST);

View File

@ -630,7 +630,11 @@ void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue)
__ b(do_continue);
}
// See if we've got enough room on the stack for locals plus overhead.
// See if we've got enough room on the stack for locals plus overhead
// below JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError
// without going through the signal handler, i.e., reserved and yellow zones
// will not be made usable. The shadow zone must suffice to handle the
// overflow.
// The expression stack grows down incrementally, so the normal guard
// page mechanism will work for that.
//
@ -674,40 +678,25 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
// compute rsp as if this were going to be the last frame on
// the stack before the red zone
const Address stack_base(rthread, Thread::stack_base_offset());
const Address stack_size(rthread, Thread::stack_size_offset());
// locals + overhead, in bytes
__ mov(r0, overhead_size);
__ add(r0, r0, r3, Assembler::LSL, Interpreter::logStackElementSize); // 2 slots per parameter.
__ ldr(rscratch1, stack_base);
__ ldr(rscratch2, stack_size);
const Address stack_limit(rthread, JavaThread::stack_overflow_limit_offset());
__ ldr(rscratch1, stack_limit);
#ifdef ASSERT
Label stack_base_okay, stack_size_okay;
// verify that thread stack base is non-zero
__ cbnz(rscratch1, stack_base_okay);
__ stop("stack base is zero");
__ bind(stack_base_okay);
// verify that thread stack size is non-zero
__ cbnz(rscratch2, stack_size_okay);
__ stop("stack size is zero");
__ bind(stack_size_okay);
Label limit_okay;
// Verify that thread stack limit is non-zero.
__ cbnz(rscratch1, limit_okay);
__ stop("stack overflow limit is zero");
__ bind(limit_okay);
#endif
// Add stack base to locals and subtract stack size
__ sub(rscratch1, rscratch1, rscratch2); // Stack limit
// Add stack limit to locals.
__ add(r0, r0, rscratch1);
// Use the bigger size for banging.
const int max_bang_size = MAX2(JavaThread::stack_shadow_zone_size(),
JavaThread::stack_red_zone_size() + JavaThread::stack_yellow_zone_size());
// add in the red and yellow zone sizes
__ add(r0, r0, max_bang_size * 2);
// check against the current stack bottom
// Check against the current stack bottom.
__ cmp(sp, r0);
__ br(Assembler::HI, after_frame_check);
@ -1088,9 +1077,9 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ ldr(r2, constMethod);
__ load_unsigned_short(r2, size_of_parameters);
// native calls don't need the stack size check since they have no
// Native calls don't need the stack size check since they have no
// expression stack and the arguments are already on the stack and
// we only add a handful of words to the stack
// we only add a handful of words to the stack.
// rmethod: Method*
// r2: size of parameters

View File

@ -706,9 +706,13 @@ class Assembler : public AbstractAssembler {
TW_OPCODE = (31u << OPCODE_SHIFT | 4u << 1),
// Atomics.
LBARX_OPCODE = (31u << OPCODE_SHIFT | 52u << 1),
LHARX_OPCODE = (31u << OPCODE_SHIFT | 116u << 1),
LWARX_OPCODE = (31u << OPCODE_SHIFT | 20u << 1),
LDARX_OPCODE = (31u << OPCODE_SHIFT | 84u << 1),
LQARX_OPCODE = (31u << OPCODE_SHIFT | 276u << 1),
STBCX_OPCODE = (31u << OPCODE_SHIFT | 694u << 1),
STHCX_OPCODE = (31u << OPCODE_SHIFT | 726u << 1),
STWCX_OPCODE = (31u << OPCODE_SHIFT | 150u << 1),
STDCX_OPCODE = (31u << OPCODE_SHIFT | 214u << 1),
STQCX_OPCODE = (31u << OPCODE_SHIFT | 182u << 1)
@ -1796,13 +1800,19 @@ class Assembler : public AbstractAssembler {
inline void waitrsv(); // >=Power7
// atomics
inline void lbarx_unchecked(Register d, Register a, Register b, int eh1 = 0); // >=Power 8
inline void lharx_unchecked(Register d, Register a, Register b, int eh1 = 0); // >=Power 8
inline void lwarx_unchecked(Register d, Register a, Register b, int eh1 = 0);
inline void ldarx_unchecked(Register d, Register a, Register b, int eh1 = 0);
inline void lqarx_unchecked(Register d, Register a, Register b, int eh1 = 0);
inline void lqarx_unchecked(Register d, Register a, Register b, int eh1 = 0); // >=Power 8
inline bool lxarx_hint_exclusive_access();
inline void lbarx( Register d, Register a, Register b, bool hint_exclusive_access = false);
inline void lharx( Register d, Register a, Register b, bool hint_exclusive_access = false);
inline void lwarx( Register d, Register a, Register b, bool hint_exclusive_access = false);
inline void ldarx( Register d, Register a, Register b, bool hint_exclusive_access = false);
inline void lqarx( Register d, Register a, Register b, bool hint_exclusive_access = false);
inline void stbcx_( Register s, Register a, Register b);
inline void sthcx_( Register s, Register a, Register b);
inline void stwcx_( Register s, Register a, Register b);
inline void stdcx_( Register s, Register a, Register b);
inline void stqcx_( Register s, Register a, Register b);
@ -2169,12 +2179,18 @@ class Assembler : public AbstractAssembler {
inline void dcbtstct(Register s2, int ct);
// Atomics: use ra0mem to disallow R0 as base.
inline void lbarx_unchecked(Register d, Register b, int eh1);
inline void lharx_unchecked(Register d, Register b, int eh1);
inline void lwarx_unchecked(Register d, Register b, int eh1);
inline void ldarx_unchecked(Register d, Register b, int eh1);
inline void lqarx_unchecked(Register d, Register b, int eh1);
inline void lbarx( Register d, Register b, bool hint_exclusive_access);
inline void lharx( Register d, Register b, bool hint_exclusive_access);
inline void lwarx( Register d, Register b, bool hint_exclusive_access);
inline void ldarx( Register d, Register b, bool hint_exclusive_access);
inline void lqarx( Register d, Register b, bool hint_exclusive_access);
inline void stbcx_(Register s, Register b);
inline void sthcx_(Register s, Register b);
inline void stwcx_(Register s, Register b);
inline void stdcx_(Register s, Register b);
inline void stqcx_(Register s, Register b);

View File

@ -594,13 +594,19 @@ inline void Assembler::waitrsv() { emit_int32( WAIT_OPCODE | 1<<(31-10)); } // W
// atomics
// Use ra0mem to disallow R0 as base.
inline void Assembler::lbarx_unchecked(Register d, Register a, Register b, int eh1) { emit_int32( LBARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
inline void Assembler::lharx_unchecked(Register d, Register a, Register b, int eh1) { emit_int32( LHARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
inline void Assembler::lwarx_unchecked(Register d, Register a, Register b, int eh1) { emit_int32( LWARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
inline void Assembler::ldarx_unchecked(Register d, Register a, Register b, int eh1) { emit_int32( LDARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
inline void Assembler::lqarx_unchecked(Register d, Register a, Register b, int eh1) { emit_int32( LQARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
inline bool Assembler::lxarx_hint_exclusive_access() { return VM_Version::has_lxarxeh(); }
inline void Assembler::lbarx( Register d, Register a, Register b, bool hint_exclusive_access) { lbarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::lharx( Register d, Register a, Register b, bool hint_exclusive_access) { lharx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::lwarx( Register d, Register a, Register b, bool hint_exclusive_access) { lwarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::ldarx( Register d, Register a, Register b, bool hint_exclusive_access) { ldarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::lqarx( Register d, Register a, Register b, bool hint_exclusive_access) { lqarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::stbcx_(Register s, Register a, Register b) { emit_int32( STBCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
inline void Assembler::sthcx_(Register s, Register a, Register b) { emit_int32( STHCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
inline void Assembler::stwcx_(Register s, Register a, Register b) { emit_int32( STWCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
inline void Assembler::stdcx_(Register s, Register a, Register b) { emit_int32( STDCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
inline void Assembler::stqcx_(Register s, Register a, Register b) { emit_int32( STQCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
@ -933,12 +939,18 @@ inline void Assembler::dcbtst( Register s2) { emit_int32( DCBTST_OPCOD
inline void Assembler::dcbtstct(Register s2, int ct) { emit_int32( DCBTST_OPCODE | rb(s2) | thct(ct)); }
// ra0 version
inline void Assembler::lbarx_unchecked(Register d, Register b, int eh1) { emit_int32( LBARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
inline void Assembler::lharx_unchecked(Register d, Register b, int eh1) { emit_int32( LHARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
inline void Assembler::lwarx_unchecked(Register d, Register b, int eh1) { emit_int32( LWARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
inline void Assembler::ldarx_unchecked(Register d, Register b, int eh1) { emit_int32( LDARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
inline void Assembler::lqarx_unchecked(Register d, Register b, int eh1) { emit_int32( LQARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
inline void Assembler::lbarx( Register d, Register b, bool hint_exclusive_access){ lbarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::lharx( Register d, Register b, bool hint_exclusive_access){ lharx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::lwarx( Register d, Register b, bool hint_exclusive_access){ lwarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::ldarx( Register d, Register b, bool hint_exclusive_access){ ldarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::lqarx( Register d, Register b, bool hint_exclusive_access){ lqarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::stbcx_(Register s, Register b) { emit_int32( STBCX_OPCODE | rs(s) | rb(b) | rc(1)); }
inline void Assembler::sthcx_(Register s, Register b) { emit_int32( STHCX_OPCODE | rs(s) | rb(b) | rc(1)); }
inline void Assembler::stwcx_(Register s, Register b) { emit_int32( STWCX_OPCODE | rs(s) | rb(b) | rc(1)); }
inline void Assembler::stdcx_(Register s, Register b) { emit_int32( STDCX_OPCODE | rs(s) | rb(b) | rc(1)); }
inline void Assembler::stqcx_(Register s, Register b) { emit_int32( STQCX_OPCODE | rs(s) | rb(b) | rc(1)); }

View File

@ -40,9 +40,9 @@ define_pd_global(bool, ImplicitNullChecks, true); // Generate code for impli
define_pd_global(bool, TrapBasedNullChecks, true);
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast.
#define DEFAULT_STACK_YELLOW_PAGES (6)
#define DEFAULT_STACK_YELLOW_PAGES (2)
#define DEFAULT_STACK_RED_PAGES (1)
#define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
#define DEFAULT_STACK_SHADOW_PAGES (20 DEBUG_ONLY(+2))
#define DEFAULT_STACK_RESERVED_PAGES (1)
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -478,33 +478,6 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, Register
profile_typecheck_failed(Rtmp1, Rtmp2);
}
void InterpreterMacroAssembler::generate_stack_overflow_check_with_compare_and_throw(Register Rmem_frame_size, Register Rscratch1) {
Label done;
BLOCK_COMMENT("stack_overflow_check_with_compare_and_throw {");
sub(Rmem_frame_size, R1_SP, Rmem_frame_size);
ld(Rscratch1, thread_(stack_overflow_limit));
cmpld(CCR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1);
bgt(CCR0/*is_stack_overflow*/, done);
// Load target address of the runtime stub.
assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
load_const_optimized(Rscratch1, (StubRoutines::throw_StackOverflowError_entry()), R0);
mtctr(Rscratch1);
// Restore caller_sp.
#ifdef ASSERT
ld(Rscratch1, 0, R1_SP);
ld(R0, 0, R21_sender_SP);
cmpd(CCR0, R0, Rscratch1);
asm_assert_eq("backlink", 0x547);
#endif // ASSERT
mr(R1_SP, R21_sender_SP);
bctr();
align(32, 12);
bind(done);
BLOCK_COMMENT("} stack_overflow_check_with_compare_and_throw");
}
// Separate these two to allow for delay slot in middle.
// These are used to do a test and full jump to exception-throwing code.

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -79,7 +79,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Load object from cpool->resolved_references(index).
void load_resolved_reference_at_index(Register result, Register index, Label *is_null = NULL);
void generate_stack_overflow_check_with_compare_and_throw(Register Rmem_frame_size, Register Rscratch1);
void load_receiver(Register Rparam_count, Register Rrecv_dst);
// helpers for expression stack

View File

@ -1422,42 +1422,168 @@ void MacroAssembler::reserved_stack_check(Register return_pc) {
bind(no_reserved_zone_enabling);
}
// CmpxchgX sets condition register to cmpX(current, compare).
void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_value,
Register compare_value, Register exchange_value,
Register addr_base, int semantics, bool cmpxchgx_hint,
Register int_flag_success, bool contention_hint, bool weak) {
void MacroAssembler::getandsetd(Register dest_current_value, Register exchange_value, Register addr_base,
bool cmpxchgx_hint) {
Label retry;
Label failed;
Label done;
// Save one branch if result is returned via register and
// result register is different from the other ones.
bool use_result_reg = (int_flag_success != noreg);
bool preset_result_reg = (int_flag_success != dest_current_value && int_flag_success != compare_value &&
int_flag_success != exchange_value && int_flag_success != addr_base);
assert(!weak || flag == CCR0, "weak only supported with CCR0");
if (use_result_reg && preset_result_reg) {
li(int_flag_success, 0); // preset (assume cas failed)
bind(retry);
ldarx(dest_current_value, addr_base, cmpxchgx_hint);
stdcx_(exchange_value, addr_base);
if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
bne_predict_not_taken(CCR0, retry); // StXcx_ sets CCR0.
} else {
bne( CCR0, retry); // StXcx_ sets CCR0.
}
}
// Add simple guard in order to reduce risk of starving under high contention (recommended by IBM).
if (contention_hint) { // Don't try to reserve if cmp fails.
lwz(dest_current_value, 0, addr_base);
cmpw(flag, dest_current_value, compare_value);
bne(flag, failed);
void MacroAssembler::getandaddd(Register dest_current_value, Register inc_value, Register addr_base,
Register tmp, bool cmpxchgx_hint) {
Label retry;
bind(retry);
ldarx(dest_current_value, addr_base, cmpxchgx_hint);
add(tmp, dest_current_value, inc_value);
stdcx_(tmp, addr_base);
if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
bne_predict_not_taken(CCR0, retry); // StXcx_ sets CCR0.
} else {
bne( CCR0, retry); // StXcx_ sets CCR0.
}
}
// release/fence semantics
if (semantics & MemBarRel) {
release();
// Word/sub-word atomic helper functions
// Temps and addr_base are killed if size < 4 and processor does not support respective instructions.
// Only signed types are supported with size < 4.
// Atomic add always kills tmp1.
void MacroAssembler::atomic_get_and_modify_generic(Register dest_current_value, Register exchange_value,
Register addr_base, Register tmp1, Register tmp2, Register tmp3,
bool cmpxchgx_hint, bool is_add, int size) {
// Sub-word instructions are available since Power 8.
// For older processors, instruction_type != size holds, and we
// emulate the sub-word instructions by constructing a 4-byte value
// that leaves the other bytes unchanged.
const int instruction_type = VM_Version::has_lqarx() ? size : 4;
Label retry;
Register shift_amount = noreg,
val32 = dest_current_value,
modval = is_add ? tmp1 : exchange_value;
if (instruction_type != size) {
assert_different_registers(tmp1, tmp2, tmp3, dest_current_value, exchange_value, addr_base);
modval = tmp1;
shift_amount = tmp2;
val32 = tmp3;
// Need some preperation: Compute shift amount, align address. Note: shorts must be 2 byte aligned.
#ifdef VM_LITTLE_ENDIAN
rldic(shift_amount, addr_base, 3, 64-5); // (dest & 3) * 8;
clrrdi(addr_base, addr_base, 2);
#else
xori(shift_amount, addr_base, (size == 1) ? 3 : 2);
clrrdi(addr_base, addr_base, 2);
rldic(shift_amount, shift_amount, 3, 64-5); // byte: ((3-dest) & 3) * 8; short: ((1-dest/2) & 1) * 16;
#endif
}
// atomic emulation loop
bind(retry);
lwarx(dest_current_value, addr_base, cmpxchgx_hint);
switch (instruction_type) {
case 4: lwarx(val32, addr_base, cmpxchgx_hint); break;
case 2: lharx(val32, addr_base, cmpxchgx_hint); break;
case 1: lbarx(val32, addr_base, cmpxchgx_hint); break;
default: ShouldNotReachHere();
}
if (instruction_type != size) {
srw(dest_current_value, val32, shift_amount);
}
if (is_add) { add(modval, dest_current_value, exchange_value); }
if (instruction_type != size) {
// Transform exchange value such that the replacement can be done by one xor instruction.
xorr(modval, dest_current_value, is_add ? modval : exchange_value);
clrldi(modval, modval, (size == 1) ? 56 : 48);
slw(modval, modval, shift_amount);
xorr(modval, val32, modval);
}
switch (instruction_type) {
case 4: stwcx_(modval, addr_base); break;
case 2: sthcx_(modval, addr_base); break;
case 1: stbcx_(modval, addr_base); break;
default: ShouldNotReachHere();
}
if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
bne_predict_not_taken(CCR0, retry); // StXcx_ sets CCR0.
} else {
bne( CCR0, retry); // StXcx_ sets CCR0.
}
// l?arx zero-extends, but Java wants byte/short values sign-extended.
if (size == 1) {
extsb(dest_current_value, dest_current_value);
} else if (size == 2) {
extsh(dest_current_value, dest_current_value);
};
}
// Temps, addr_base and exchange_value are killed if size < 4 and processor does not support respective instructions.
// Only signed types are supported with size < 4.
void MacroAssembler::cmpxchg_loop_body(ConditionRegister flag, Register dest_current_value,
Register compare_value, Register exchange_value,
Register addr_base, Register tmp1, Register tmp2,
Label &retry, Label &failed, bool cmpxchgx_hint, int size) {
// Sub-word instructions are available since Power 8.
// For older processors, instruction_type != size holds, and we
// emulate the sub-word instructions by constructing a 4-byte value
// that leaves the other bytes unchanged.
const int instruction_type = VM_Version::has_lqarx() ? size : 4;
Register shift_amount = noreg,
val32 = dest_current_value,
modval = exchange_value;
if (instruction_type != size) {
assert_different_registers(tmp1, tmp2, dest_current_value, compare_value, exchange_value, addr_base);
shift_amount = tmp1;
val32 = tmp2;
modval = tmp2;
// Need some preperation: Compute shift amount, align address. Note: shorts must be 2 byte aligned.
#ifdef VM_LITTLE_ENDIAN
rldic(shift_amount, addr_base, 3, 64-5); // (dest & 3) * 8;
clrrdi(addr_base, addr_base, 2);
#else
xori(shift_amount, addr_base, (size == 1) ? 3 : 2);
clrrdi(addr_base, addr_base, 2);
rldic(shift_amount, shift_amount, 3, 64-5); // byte: ((3-dest) & 3) * 8; short: ((1-dest/2) & 1) * 16;
#endif
// Transform exchange value such that the replacement can be done by one xor instruction.
xorr(exchange_value, compare_value, exchange_value);
clrldi(exchange_value, exchange_value, (size == 1) ? 56 : 48);
slw(exchange_value, exchange_value, shift_amount);
}
// atomic emulation loop
bind(retry);
switch (instruction_type) {
case 4: lwarx(val32, addr_base, cmpxchgx_hint); break;
case 2: lharx(val32, addr_base, cmpxchgx_hint); break;
case 1: lbarx(val32, addr_base, cmpxchgx_hint); break;
default: ShouldNotReachHere();
}
if (instruction_type != size) {
srw(dest_current_value, val32, shift_amount);
}
if (size == 1) {
extsb(dest_current_value, dest_current_value);
} else if (size == 2) {
extsh(dest_current_value, dest_current_value);
};
cmpw(flag, dest_current_value, compare_value);
if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
bne_predict_not_taken(flag, failed);
@ -1467,7 +1593,60 @@ void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_valu
// branch to done => (flag == ne), (dest_current_value != compare_value)
// fall through => (flag == eq), (dest_current_value == compare_value)
stwcx_(exchange_value, addr_base);
if (instruction_type != size) {
xorr(modval, val32, exchange_value);
}
switch (instruction_type) {
case 4: stwcx_(modval, addr_base); break;
case 2: sthcx_(modval, addr_base); break;
case 1: stbcx_(modval, addr_base); break;
default: ShouldNotReachHere();
}
}
// CmpxchgX sets condition register to cmpX(current, compare).
void MacroAssembler::cmpxchg_generic(ConditionRegister flag, Register dest_current_value,
Register compare_value, Register exchange_value,
Register addr_base, Register tmp1, Register tmp2,
int semantics, bool cmpxchgx_hint,
Register int_flag_success, bool contention_hint, bool weak, int size) {
Label retry;
Label failed;
Label done;
// Save one branch if result is returned via register and
// result register is different from the other ones.
bool use_result_reg = (int_flag_success != noreg);
bool preset_result_reg = (int_flag_success != dest_current_value && int_flag_success != compare_value &&
int_flag_success != exchange_value && int_flag_success != addr_base &&
int_flag_success != tmp1 && int_flag_success != tmp2);
assert(!weak || flag == CCR0, "weak only supported with CCR0");
assert(size == 1 || size == 2 || size == 4, "unsupported");
if (use_result_reg && preset_result_reg) {
li(int_flag_success, 0); // preset (assume cas failed)
}
// Add simple guard in order to reduce risk of starving under high contention (recommended by IBM).
if (contention_hint) { // Don't try to reserve if cmp fails.
switch (size) {
case 1: lbz(dest_current_value, 0, addr_base); extsb(dest_current_value, dest_current_value); break;
case 2: lha(dest_current_value, 0, addr_base); break;
case 4: lwz(dest_current_value, 0, addr_base); break;
default: ShouldNotReachHere();
}
cmpw(flag, dest_current_value, compare_value);
bne(flag, failed);
}
// release/fence semantics
if (semantics & MemBarRel) {
release();
}
cmpxchg_loop_body(flag, dest_current_value, compare_value, exchange_value, addr_base, tmp1, tmp2,
retry, failed, cmpxchgx_hint, size);
if (!weak || use_result_reg) {
if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
bne_predict_not_taken(CCR0, weak ? failed : retry); // StXcx_ sets CCR0.
@ -3751,454 +3930,6 @@ void MacroAssembler::has_negatives(Register src, Register cnt, Register result,
bind(Ldone);
}
// Intrinsics for non-CompactStrings
// Search for a single jchar in an jchar[].
//
// Assumes that result differs from all other registers.
//
// 'haystack' is the addresses of a jchar-array.
// 'needle' is either the character to search for or R0.
// 'needleChar' is the character to search for if 'needle' == R0..
// 'haycnt' is the length of the haystack. We assume 'haycnt' >=1.
//
// Preserves haystack, haycnt, needle and kills all other registers.
//
// If needle == R0, we search for the constant needleChar.
void MacroAssembler::string_indexof_1(Register result, Register haystack, Register haycnt,
Register needle, jchar needleChar,
Register tmp1, Register tmp2) {
assert_different_registers(result, haystack, haycnt, needle, tmp1, tmp2);
Label L_InnerLoop, L_FinalCheck, L_Found1, L_Found2, L_Found3, L_NotFound, L_End;
Register addr = tmp1,
ch1 = tmp2,
ch2 = R0;
//3:
dcbtct(haystack, 0x00); // Indicate R/O access to haystack.
srwi_(tmp2, haycnt, 1); // Shift right by exact_log2(UNROLL_FACTOR).
mr(addr, haystack);
beq(CCR0, L_FinalCheck);
mtctr(tmp2); // Move to count register.
//8:
bind(L_InnerLoop); // Main work horse (2x unrolled search loop).
lhz(ch1, 0, addr); // Load characters from haystack.
lhz(ch2, 2, addr);
(needle != R0) ? cmpw(CCR0, ch1, needle) : cmplwi(CCR0, ch1, needleChar);
(needle != R0) ? cmpw(CCR1, ch2, needle) : cmplwi(CCR1, ch2, needleChar);
beq(CCR0, L_Found1); // Did we find the needle?
beq(CCR1, L_Found2);
addi(addr, addr, 4);
bdnz(L_InnerLoop);
//16:
bind(L_FinalCheck);
andi_(R0, haycnt, 1);
beq(CCR0, L_NotFound);
lhz(ch1, 0, addr); // One position left at which we have to compare.
(needle != R0) ? cmpw(CCR1, ch1, needle) : cmplwi(CCR1, ch1, needleChar);
beq(CCR1, L_Found3);
//21:
bind(L_NotFound);
li(result, -1); // Not found.
b(L_End);
bind(L_Found2);
addi(addr, addr, 2);
//24:
bind(L_Found1);
bind(L_Found3); // Return index ...
subf(addr, haystack, addr); // relative to haystack,
srdi(result, addr, 1); // in characters.
bind(L_End);
}
// Implementation of IndexOf for jchar arrays.
//
// The length of haystack and needle are not constant, i.e. passed in a register.
//
// Preserves registers haystack, needle.
// Kills registers haycnt, needlecnt.
// Assumes that result differs from all other registers.
// Haystack, needle are the addresses of jchar-arrays.
// Haycnt, needlecnt are the lengths of them, respectively.
//
// Needlecntval must be zero or 15-bit unsigned immediate and > 1.
void MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt,
Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
// Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite!
Label L_TooShort, L_Found, L_NotFound, L_End;
Register last_addr = haycnt, // Kill haycnt at the beginning.
addr = tmp1,
n_start = tmp2,
ch1 = tmp3,
ch2 = R0;
// **************************************************************************************************
// Prepare for main loop: optimized for needle count >=2, bail out otherwise.
// **************************************************************************************************
//1 (variable) or 3 (const):
dcbtct(needle, 0x00); // Indicate R/O access to str1.
dcbtct(haystack, 0x00); // Indicate R/O access to str2.
// Compute last haystack addr to use if no match gets found.
if (needlecntval == 0) { // variable needlecnt
//3:
subf(ch1, needlecnt, haycnt); // Last character index to compare is haycnt-needlecnt.
addi(addr, haystack, -2); // Accesses use pre-increment.
cmpwi(CCR6, needlecnt, 2);
blt(CCR6, L_TooShort); // Variable needlecnt: handle short needle separately.
slwi(ch1, ch1, 1); // Scale to number of bytes.
lwz(n_start, 0, needle); // Load first 2 characters of needle.
add(last_addr, haystack, ch1); // Point to last address to compare (haystack+2*(haycnt-needlecnt)).
addi(needlecnt, needlecnt, -2); // Rest of needle.
} else { // constant needlecnt
guarantee(needlecntval != 1, "IndexOf with single-character needle must be handled separately");
assert((needlecntval & 0x7fff) == needlecntval, "wrong immediate");
//5:
addi(ch1, haycnt, -needlecntval); // Last character index to compare is haycnt-needlecnt.
lwz(n_start, 0, needle); // Load first 2 characters of needle.
addi(addr, haystack, -2); // Accesses use pre-increment.
slwi(ch1, ch1, 1); // Scale to number of bytes.
add(last_addr, haystack, ch1); // Point to last address to compare (haystack+2*(haycnt-needlecnt)).
li(needlecnt, needlecntval-2); // Rest of needle.
}
// Main Loop (now we have at least 3 characters).
//11:
Label L_OuterLoop, L_InnerLoop, L_FinalCheck, L_Comp1, L_Comp2, L_Comp3;
bind(L_OuterLoop); // Search for 1st 2 characters.
Register addr_diff = tmp4;
subf(addr_diff, addr, last_addr); // Difference between already checked address and last address to check.
addi(addr, addr, 2); // This is the new address we want to use for comparing.
srdi_(ch2, addr_diff, 2);
beq(CCR0, L_FinalCheck); // 2 characters left?
mtctr(ch2); // addr_diff/4
//16:
bind(L_InnerLoop); // Main work horse (2x unrolled search loop)
lwz(ch1, 0, addr); // Load 2 characters of haystack (ignore alignment).
lwz(ch2, 2, addr);
cmpw(CCR0, ch1, n_start); // Compare 2 characters (1 would be sufficient but try to reduce branches to CompLoop).
cmpw(CCR1, ch2, n_start);
beq(CCR0, L_Comp1); // Did we find the needle start?
beq(CCR1, L_Comp2);
addi(addr, addr, 4);
bdnz(L_InnerLoop);
//24:
bind(L_FinalCheck);
rldicl_(addr_diff, addr_diff, 64-1, 63); // Remaining characters not covered by InnerLoop: (addr_diff>>1)&1.
beq(CCR0, L_NotFound);
lwz(ch1, 0, addr); // One position left at which we have to compare.
cmpw(CCR1, ch1, n_start);
beq(CCR1, L_Comp3);
//29:
bind(L_NotFound);
li(result, -1); // not found
b(L_End);
// **************************************************************************************************
// Special Case: unfortunately, the variable needle case can be called with needlecnt<2
// **************************************************************************************************
//31:
if ((needlecntval>>1) !=1 ) { // Const needlecnt is 2 or 3? Reduce code size.
int nopcnt = 5;
if (needlecntval !=0 ) ++nopcnt; // Balance alignment (other case: see below).
if (needlecntval == 0) { // We have to handle these cases separately.
Label L_OneCharLoop;
bind(L_TooShort);
mtctr(haycnt);
lhz(n_start, 0, needle); // First character of needle
bind(L_OneCharLoop);
lhzu(ch1, 2, addr);
cmpw(CCR1, ch1, n_start);
beq(CCR1, L_Found); // Did we find the one character needle?
bdnz(L_OneCharLoop);
li(result, -1); // Not found.
b(L_End);
} // 8 instructions, so no impact on alignment.
for (int x = 0; x < nopcnt; ++x) nop();
}
// **************************************************************************************************
// Regular Case Part II: compare rest of needle (first 2 characters have been compared already)
// **************************************************************************************************
// Compare the rest
//36 if needlecntval==0, else 37:
bind(L_Comp2);
addi(addr, addr, 2); // First comparison has failed, 2nd one hit.
bind(L_Comp1); // Addr points to possible needle start.
bind(L_Comp3); // Could have created a copy and use a different return address but saving code size here.
if (needlecntval != 2) { // Const needlecnt==2?
if (needlecntval != 3) {
if (needlecntval == 0) beq(CCR6, L_Found); // Variable needlecnt==2?
Register ind_reg = tmp4;
li(ind_reg, 2*2); // First 2 characters are already compared, use index 2.
mtctr(needlecnt); // Decremented by 2, still > 0.
//40:
Label L_CompLoop;
bind(L_CompLoop);
lhzx(ch2, needle, ind_reg);
lhzx(ch1, addr, ind_reg);
cmpw(CCR1, ch1, ch2);
bne(CCR1, L_OuterLoop);
addi(ind_reg, ind_reg, 2);
bdnz(L_CompLoop);
} else { // No loop required if there's only one needle character left.
lhz(ch2, 2*2, needle);
lhz(ch1, 2*2, addr);
cmpw(CCR1, ch1, ch2);
bne(CCR1, L_OuterLoop);
}
}
// Return index ...
//46:
bind(L_Found);
subf(addr, haystack, addr); // relative to haystack, ...
srdi(result, addr, 1); // in characters.
//48:
bind(L_End);
}
// Implementation of Compare for jchar arrays.
//
// Kills the registers str1, str2, cnt1, cnt2.
// Kills cr0, ctr.
// Assumes that result differes from the input registers.
void MacroAssembler::string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
Register result_reg, Register tmp_reg) {
assert_different_registers(result_reg, str1_reg, str2_reg, cnt1_reg, cnt2_reg, tmp_reg);
Label Ldone, Lslow_case, Lslow_loop, Lfast_loop;
Register cnt_diff = R0,
limit_reg = cnt1_reg,
chr1_reg = result_reg,
chr2_reg = cnt2_reg,
addr_diff = str2_reg;
// 'cnt_reg' contains the number of characters in the string's character array for the
// pre-CompactStrings strings implementation and the number of bytes in the string's
// byte array for the CompactStrings strings implementation.
const int HAS_COMPACT_STRING = java_lang_String::has_coder_field() ? 1 : 0; // '1' = byte array, '0' = char array
// Offset 0 should be 32 byte aligned.
//-6:
srawi(cnt1_reg, cnt1_reg, HAS_COMPACT_STRING);
srawi(cnt2_reg, cnt2_reg, HAS_COMPACT_STRING);
//-4:
dcbtct(str1_reg, 0x00); // Indicate R/O access to str1.
dcbtct(str2_reg, 0x00); // Indicate R/O access to str2.
//-2:
// Compute min(cnt1, cnt2) and check if 0 (bail out if we don't need to compare characters).
subf(result_reg, cnt2_reg, cnt1_reg); // difference between cnt1/2
subf_(addr_diff, str1_reg, str2_reg); // alias?
beq(CCR0, Ldone); // return cnt difference if both ones are identical
srawi(limit_reg, result_reg, 31); // generate signmask (cnt1/2 must be non-negative so cnt_diff can't overflow)
mr(cnt_diff, result_reg);
andr(limit_reg, result_reg, limit_reg); // difference or zero (negative): cnt1<cnt2 ? cnt1-cnt2 : 0
add_(limit_reg, cnt2_reg, limit_reg); // min(cnt1, cnt2)==0?
beq(CCR0, Ldone); // return cnt difference if one has 0 length
lhz(chr1_reg, 0, str1_reg); // optional: early out if first characters mismatch
lhzx(chr2_reg, str1_reg, addr_diff); // optional: early out if first characters mismatch
addi(tmp_reg, limit_reg, -1); // min(cnt1, cnt2)-1
subf_(result_reg, chr2_reg, chr1_reg); // optional: early out if first characters mismatch
bne(CCR0, Ldone); // optional: early out if first characters mismatch
// Set loop counter by scaling down tmp_reg
srawi_(chr2_reg, tmp_reg, exact_log2(4)); // (min(cnt1, cnt2)-1)/4
ble(CCR0, Lslow_case); // need >4 characters for fast loop
andi(limit_reg, tmp_reg, 4-1); // remaining characters
// Adapt str1_reg str2_reg for the first loop iteration
mtctr(chr2_reg); // (min(cnt1, cnt2)-1)/4
addi(limit_reg, limit_reg, 4+1); // compare last 5-8 characters in slow_case if mismatch found in fast_loop
//16:
// Compare the rest of the characters
bind(Lfast_loop);
ld(chr1_reg, 0, str1_reg);
ldx(chr2_reg, str1_reg, addr_diff);
cmpd(CCR0, chr2_reg, chr1_reg);
bne(CCR0, Lslow_case); // return chr1_reg
addi(str1_reg, str1_reg, 4*2);
bdnz(Lfast_loop);
addi(limit_reg, limit_reg, -4); // no mismatch found in fast_loop, only 1-4 characters missing
//23:
bind(Lslow_case);
mtctr(limit_reg);
//24:
bind(Lslow_loop);
lhz(chr1_reg, 0, str1_reg);
lhzx(chr2_reg, str1_reg, addr_diff);
subf_(result_reg, chr2_reg, chr1_reg);
bne(CCR0, Ldone); // return chr1_reg
addi(str1_reg, str1_reg, 1*2);
bdnz(Lslow_loop);
//30:
// If strings are equal up to min length, return the length difference.
mr(result_reg, cnt_diff);
nop(); // alignment
//32:
// Otherwise, return the difference between the first mismatched chars.
bind(Ldone);
}
// Compare char[] arrays.
//
// str1_reg USE only
// str2_reg USE only
// cnt_reg USE_DEF, due to tmp reg shortage
// result_reg DEF only, might compromise USE only registers
void MacroAssembler::char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
Register tmp5_reg) {
// Str1 may be the same register as str2 which can occur e.g. after scalar replacement.
assert_different_registers(result_reg, str1_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg);
assert_different_registers(result_reg, str2_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg);
// Offset 0 should be 32 byte aligned.
Label Linit_cbc, Lcbc, Lloop, Ldone_true, Ldone_false;
Register index_reg = tmp5_reg;
Register cbc_iter = tmp4_reg;
// 'cnt_reg' contains the number of characters in the string's character array for the
// pre-CompactStrings strings implementation and the number of bytes in the string's
// byte array for the CompactStrings strings implementation.
const int HAS_COMPACT_STRING = java_lang_String::has_coder_field() ? 1 : 0; // '1' = byte array, '0' = char array
//-1:
dcbtct(str1_reg, 0x00); // Indicate R/O access to str1.
dcbtct(str2_reg, 0x00); // Indicate R/O access to str2.
//1:
// cbc_iter: remaining characters after the '4 java characters per iteration' loop.
rlwinm(cbc_iter, cnt_reg, 32 - HAS_COMPACT_STRING, 30, 31); // (cnt_reg % (HAS_COMPACT_STRING ? 8 : 4)) >> HAS_COMPACT_STRING
li(index_reg, 0); // init
li(result_reg, 0); // assume false
// tmp2_reg: units of 4 java characters (i.e. 8 bytes) per iteration (main loop).
srwi_(tmp2_reg, cnt_reg, exact_log2(4 << HAS_COMPACT_STRING)); // cnt_reg / (HAS_COMPACT_STRING ? 8 : 4)
cmpwi(CCR1, cbc_iter, 0); // CCR1 = (cbc_iter==0)
beq(CCR0, Linit_cbc); // too short
mtctr(tmp2_reg);
//8:
bind(Lloop);
ldx(tmp1_reg, str1_reg, index_reg);
ldx(tmp2_reg, str2_reg, index_reg);
cmpd(CCR0, tmp1_reg, tmp2_reg);
bne(CCR0, Ldone_false); // Unequal char pair found -> done.
addi(index_reg, index_reg, 4*sizeof(jchar));
bdnz(Lloop);
//14:
bind(Linit_cbc);
beq(CCR1, Ldone_true);
mtctr(cbc_iter);
//16:
bind(Lcbc);
lhzx(tmp1_reg, str1_reg, index_reg);
lhzx(tmp2_reg, str2_reg, index_reg);
cmpw(CCR0, tmp1_reg, tmp2_reg);
bne(CCR0, Ldone_false); // Unequal char pair found -> done.
addi(index_reg, index_reg, 1*sizeof(jchar));
bdnz(Lcbc);
nop();
bind(Ldone_true);
li(result_reg, 1);
//24:
bind(Ldone_false);
}
void MacroAssembler::char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
Register tmp1_reg, Register tmp2_reg) {
// Str1 may be the same register as str2 which can occur e.g. after scalar replacement.
assert_different_registers(result_reg, str1_reg, tmp1_reg, tmp2_reg);
assert_different_registers(result_reg, str2_reg, tmp1_reg, tmp2_reg);
assert(sizeof(jchar) == 2, "must be");
assert(cntval >= 0 && ((cntval & 0x7fff) == cntval), "wrong immediate");
// 'cntval' contains the number of characters in the string's character array for the
// pre-CompactStrings strings implementation and the number of bytes in the string's
// byte array for the CompactStrings strings implementation.
cntval >>= (java_lang_String::has_coder_field() ? 1 : 0); // '1' = byte array strings, '0' = char array strings
Label Ldone_false;
if (cntval < 16) { // short case
if (cntval != 0) li(result_reg, 0); // assume false
const int num_bytes = cntval*sizeof(jchar);
int index = 0;
for (int next_index; (next_index = index + 8) <= num_bytes; index = next_index) {
ld(tmp1_reg, index, str1_reg);
ld(tmp2_reg, index, str2_reg);
cmpd(CCR0, tmp1_reg, tmp2_reg);
bne(CCR0, Ldone_false);
}
if (cntval & 2) {
lwz(tmp1_reg, index, str1_reg);
lwz(tmp2_reg, index, str2_reg);
cmpw(CCR0, tmp1_reg, tmp2_reg);
bne(CCR0, Ldone_false);
index += 4;
}
if (cntval & 1) {
lhz(tmp1_reg, index, str1_reg);
lhz(tmp2_reg, index, str2_reg);
cmpw(CCR0, tmp1_reg, tmp2_reg);
bne(CCR0, Ldone_false);
}
// fallthrough: true
} else {
Label Lloop;
Register index_reg = tmp1_reg;
const int loopcnt = cntval/4;
assert(loopcnt > 0, "must be");
// Offset 0 should be 32 byte aligned.
//2:
dcbtct(str1_reg, 0x00); // Indicate R/O access to str1.
dcbtct(str2_reg, 0x00); // Indicate R/O access to str2.
li(tmp2_reg, loopcnt);
li(index_reg, 0); // init
li(result_reg, 0); // assume false
mtctr(tmp2_reg);
//8:
bind(Lloop);
ldx(R0, str1_reg, index_reg);
ldx(tmp2_reg, str2_reg, index_reg);
cmpd(CCR0, R0, tmp2_reg);
bne(CCR0, Ldone_false); // Unequal char pair found -> done.
addi(index_reg, index_reg, 4*sizeof(jchar));
bdnz(Lloop);
//14:
if (cntval & 2) {
lwzx(R0, str1_reg, index_reg);
lwzx(tmp2_reg, str2_reg, index_reg);
cmpw(CCR0, R0, tmp2_reg);
bne(CCR0, Ldone_false);
if (cntval & 1) addi(index_reg, index_reg, 2*sizeof(jchar));
}
if (cntval & 1) {
lhzx(R0, str1_reg, index_reg);
lhzx(tmp2_reg, str2_reg, index_reg);
cmpw(CCR0, R0, tmp2_reg);
bne(CCR0, Ldone_false);
}
// fallthru: true
}
li(result_reg, 1);
bind(Ldone_false);
}
#endif // Compiler2
// Helpers for Intrinsic Emitters

View File

@ -431,10 +431,81 @@ class MacroAssembler: public Assembler {
MemBarAcq = 2,
MemBarFenceAfter = 4 // use powers of 2
};
private:
// Helper functions for word/sub-word atomics.
void atomic_get_and_modify_generic(Register dest_current_value, Register exchange_value,
Register addr_base, Register tmp1, Register tmp2, Register tmp3,
bool cmpxchgx_hint, bool is_add, int size);
void cmpxchg_loop_body(ConditionRegister flag, Register dest_current_value,
Register compare_value, Register exchange_value,
Register addr_base, Register tmp1, Register tmp2,
Label &retry, Label &failed, bool cmpxchgx_hint, int size);
void cmpxchg_generic(ConditionRegister flag,
Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
Register tmp1, Register tmp2,
int semantics, bool cmpxchgx_hint, Register int_flag_success, bool contention_hint, bool weak, int size);
public:
// Temps and addr_base are killed if processor does not support Power 8 instructions.
// Result will be sign extended.
void getandsetb(Register dest_current_value, Register exchange_value, Register addr_base,
Register tmp1, Register tmp2, Register tmp3, bool cmpxchgx_hint) {
atomic_get_and_modify_generic(dest_current_value, exchange_value, addr_base, tmp1, tmp2, tmp3, cmpxchgx_hint, false, 1);
}
// Temps and addr_base are killed if processor does not support Power 8 instructions.
// Result will be sign extended.
void getandseth(Register dest_current_value, Register exchange_value, Register addr_base,
Register tmp1, Register tmp2, Register tmp3, bool cmpxchgx_hint) {
atomic_get_and_modify_generic(dest_current_value, exchange_value, addr_base, tmp1, tmp2, tmp3, cmpxchgx_hint, false, 2);
}
void getandsetw(Register dest_current_value, Register exchange_value, Register addr_base,
bool cmpxchgx_hint) {
atomic_get_and_modify_generic(dest_current_value, exchange_value, addr_base, noreg, noreg, noreg, cmpxchgx_hint, false, 4);
}
void getandsetd(Register dest_current_value, Register exchange_value, Register addr_base,
bool cmpxchgx_hint);
// tmp2/3 and addr_base are killed if processor does not support Power 8 instructions (tmp1 is always needed).
// Result will be sign extended.
void getandaddb(Register dest_current_value, Register inc_value, Register addr_base,
Register tmp1, Register tmp2, Register tmp3, bool cmpxchgx_hint) {
atomic_get_and_modify_generic(dest_current_value, inc_value, addr_base, tmp1, tmp2, tmp3, cmpxchgx_hint, true, 1);
}
// tmp2/3 and addr_base are killed if processor does not support Power 8 instructions (tmp1 is always needed).
// Result will be sign extended.
void getandaddh(Register dest_current_value, Register inc_value, Register addr_base,
Register tmp1, Register tmp2, Register tmp3, bool cmpxchgx_hint) {
atomic_get_and_modify_generic(dest_current_value, inc_value, addr_base, tmp1, tmp2, tmp3, cmpxchgx_hint, true, 2);
}
void getandaddw(Register dest_current_value, Register inc_value, Register addr_base,
Register tmp1, bool cmpxchgx_hint) {
atomic_get_and_modify_generic(dest_current_value, inc_value, addr_base, tmp1, noreg, noreg, cmpxchgx_hint, true, 4);
}
void getandaddd(Register dest_current_value, Register exchange_value, Register addr_base,
Register tmp, bool cmpxchgx_hint);
// Temps, addr_base and exchange_value are killed if processor does not support Power 8 instructions.
// compare_value must be at least 32 bit sign extended. Result will be sign extended.
void cmpxchgb(ConditionRegister flag,
Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
Register tmp1, Register tmp2, int semantics, bool cmpxchgx_hint = false,
Register int_flag_success = noreg, bool contention_hint = false, bool weak = false) {
cmpxchg_generic(flag, dest_current_value, compare_value, exchange_value, addr_base, tmp1, tmp2,
semantics, cmpxchgx_hint, int_flag_success, contention_hint, weak, 1);
}
// Temps, addr_base and exchange_value are killed if processor does not support Power 8 instructions.
// compare_value must be at least 32 bit sign extended. Result will be sign extended.
void cmpxchgh(ConditionRegister flag,
Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
Register tmp1, Register tmp2, int semantics, bool cmpxchgx_hint = false,
Register int_flag_success = noreg, bool contention_hint = false, bool weak = false) {
cmpxchg_generic(flag, dest_current_value, compare_value, exchange_value, addr_base, tmp1, tmp2,
semantics, cmpxchgx_hint, int_flag_success, contention_hint, weak, 2);
}
void cmpxchgw(ConditionRegister flag,
Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
int semantics, bool cmpxchgx_hint = false,
Register int_flag_success = noreg, bool contention_hint = false, bool weak = false);
Register int_flag_success = noreg, bool contention_hint = false, bool weak = false) {
cmpxchg_generic(flag, dest_current_value, compare_value, exchange_value, addr_base, noreg, noreg,
semantics, cmpxchgx_hint, int_flag_success, contention_hint, weak, 4);
}
void cmpxchgd(ConditionRegister flag,
Register dest_current_value, RegisterOrConstant compare_value, Register exchange_value,
Register addr_base, int semantics, bool cmpxchgx_hint = false,
@ -717,23 +788,6 @@ class MacroAssembler: public Assembler {
Register needle, jchar needleChar, Register tmp1, Register tmp2, bool is_byte);
void has_negatives(Register src, Register cnt, Register result, Register tmp1, Register tmp2);
// Intrinsics for non-CompactStrings
// Needle of length 1.
void string_indexof_1(Register result, Register haystack, Register haycnt,
Register needle, jchar needleChar,
Register tmp1, Register tmp2);
// General indexof, eventually with constant needle length.
void string_indexof(Register result, Register haystack, Register haycnt,
Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
Register tmp1, Register tmp2, Register tmp3, Register tmp4);
void string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
Register result_reg, Register tmp_reg);
void char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
Register tmp5_reg);
void char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
Register tmp1_reg, Register tmp2_reg);
#endif
// Emitters for BigInteger.multiplyToLen intrinsic.

File diff suppressed because it is too large Load Diff

View File

@ -1123,7 +1123,10 @@ class StubGenerator: public StubCodeGenerator {
Register tmp3 = R8_ARG6;
Register tmp4 = R9_ARG7;
Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9;
VectorSRegister tmp_vsr1 = VSR1;
VectorSRegister tmp_vsr2 = VSR2;
Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9, l_10;
// Don't try anything fancy if arrays don't have many elements.
__ li(tmp3, 0);
@ -1178,6 +1181,8 @@ class StubGenerator: public StubCodeGenerator {
__ andi_(R5_ARG3, R5_ARG3, 31);
__ mtctr(tmp1);
if (!VM_Version::has_vsx()) {
__ bind(l_8);
// Use unrolled version for mass copying (copy 32 elements a time)
// Load feeding store gets zero latency on Power6, however not on Power5.
@ -1193,7 +1198,44 @@ class StubGenerator: public StubCodeGenerator {
__ addi(R3_ARG1, R3_ARG1, 32);
__ addi(R4_ARG2, R4_ARG2, 32);
__ bdnz(l_8);
}
} else { // Processor supports VSX, so use it to mass copy.
// Prefetch the data into the L2 cache.
__ dcbt(R3_ARG1, 0);
// If supported set DSCR pre-fetch to deepest.
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. Not 16-byte align as
// loop contains < 8 instructions that fit inside a single
// i-cache sector.
__ align(32);
__ bind(l_10);
// Use loop with VSX load/store instructions to
// copy 32 elements a time.
__ lxvd2x(tmp_vsr1, 0, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr1, 0, R4_ARG2); // Store to dst
__ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src + 16
__ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
__ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32
__ addi(R4_ARG2, R4_ARG2, 32); // Update dsc+=32
__ bdnz(l_10); // Dec CTR and loop if not zero.
// Restore DSCR pre-fetch value.
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
} // VSX
} // FasterArrayCopy
__ bind(l_6);
@ -1557,7 +1599,10 @@ class StubGenerator: public StubCodeGenerator {
Register tmp3 = R8_ARG6;
Register tmp4 = R0;
Label l_1, l_2, l_3, l_4, l_5, l_6;
VectorSRegister tmp_vsr1 = VSR1;
VectorSRegister tmp_vsr2 = VSR2;
Label l_1, l_2, l_3, l_4, l_5, l_6, l_7;
// for short arrays, just do single element copy
__ li(tmp3, 0);
@ -1593,6 +1638,8 @@ class StubGenerator: public StubCodeGenerator {
__ andi_(R5_ARG3, R5_ARG3, 7);
__ mtctr(tmp1);
if (!VM_Version::has_vsx()) {
__ bind(l_6);
// Use unrolled version for mass copying (copy 8 elements a time).
// Load feeding store gets zero latency on power6, however not on power 5.
@ -1608,7 +1655,44 @@ class StubGenerator: public StubCodeGenerator {
__ addi(R3_ARG1, R3_ARG1, 32);
__ addi(R4_ARG2, R4_ARG2, 32);
__ bdnz(l_6);
}
} else { // Processor supports VSX, so use it to mass copy.
// Prefetch the data into the L2 cache.
__ dcbt(R3_ARG1, 0);
// If supported set DSCR pre-fetch to deepest.
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. Not 16-byte align as
// loop contains < 8 instructions that fit inside a single
// i-cache sector.
__ align(32);
__ bind(l_7);
// Use loop with VSX load/store instructions to
// copy 8 elements a time.
__ lxvd2x(tmp_vsr1, 0, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr1, 0, R4_ARG2); // Store to dst
__ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src + 16
__ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
__ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32
__ addi(R4_ARG2, R4_ARG2, 32); // Update dsc+=32
__ bdnz(l_7); // Dec CTR and loop if not zero.
// Restore DSCR pre-fetch value.
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
} // VSX
} // FasterArrayCopy
// copy 1 element at a time
__ bind(l_2);
@ -1757,7 +1841,10 @@ class StubGenerator: public StubCodeGenerator {
Register tmp3 = R8_ARG6;
Register tmp4 = R0;
Label l_1, l_2, l_3, l_4;
Label l_1, l_2, l_3, l_4, l_5;
VectorSRegister tmp_vsr1 = VSR1;
VectorSRegister tmp_vsr2 = VSR2;
{ // FasterArrayCopy
__ cmpwi(CCR0, R5_ARG3, 3);
@ -1767,6 +1854,7 @@ class StubGenerator: public StubCodeGenerator {
__ andi_(R5_ARG3, R5_ARG3, 3);
__ mtctr(tmp1);
if (!VM_Version::has_vsx()) {
__ bind(l_4);
// Use unrolled version for mass copying (copy 4 elements a time).
// Load feeding store gets zero latency on Power6, however not on Power5.
@ -1782,7 +1870,44 @@ class StubGenerator: public StubCodeGenerator {
__ addi(R3_ARG1, R3_ARG1, 32);
__ addi(R4_ARG2, R4_ARG2, 32);
__ bdnz(l_4);
}
} else { // Processor supports VSX, so use it to mass copy.
// Prefetch the data into the L2 cache.
__ dcbt(R3_ARG1, 0);
// If supported set DSCR pre-fetch to deepest.
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. Not 16-byte align as
// loop contains < 8 instructions that fit inside a single
// i-cache sector.
__ align(32);
__ bind(l_5);
// Use loop with VSX load/store instructions to
// copy 4 elements a time.
__ lxvd2x(tmp_vsr1, 0, R3_ARG1); // Load src
__ stxvd2x(tmp_vsr1, 0, R4_ARG2); // Store to dst
__ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src + 16
__ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
__ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32
__ addi(R4_ARG2, R4_ARG2, 32); // Update dsc+=32
__ bdnz(l_5); // Dec CTR and loop if not zero.
// Restore DSCR pre-fetch value.
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
} // VSX
} // FasterArrayCopy
// copy 1 element at a time
__ bind(l_3);

View File

@ -845,9 +845,40 @@ void TemplateInterpreterGenerator::generate_counter_overflow(Label& continue_ent
__ b(continue_entry);
}
// See if we've got enough room on the stack for locals plus overhead below
// JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError
// without going through the signal handler, i.e., reserved and yellow zones
// will not be made usable. The shadow zone must suffice to handle the
// overflow.
//
// Kills Rmem_frame_size, Rscratch1.
void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_frame_size, Register Rscratch1) {
Label done;
assert_different_registers(Rmem_frame_size, Rscratch1);
__ generate_stack_overflow_check_with_compare_and_throw(Rmem_frame_size, Rscratch1);
BLOCK_COMMENT("stack_overflow_check_with_compare {");
__ sub(Rmem_frame_size, R1_SP, Rmem_frame_size);
__ ld(Rscratch1, thread_(stack_overflow_limit));
__ cmpld(CCR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1);
__ bgt(CCR0/*is_stack_overflow*/, done);
// The stack overflows. Load target address of the runtime stub and call it.
assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
__ load_const_optimized(Rscratch1, (StubRoutines::throw_StackOverflowError_entry()), R0);
__ mtctr(Rscratch1);
// Restore caller_sp.
#ifdef ASSERT
__ ld(Rscratch1, 0, R1_SP);
__ ld(R0, 0, R21_sender_SP);
__ cmpd(CCR0, R0, Rscratch1);
__ asm_assert_eq("backlink", 0x547);
#endif // ASSERT
__ mr(R1_SP, R21_sender_SP);
__ bctr();
__ align(32, 12);
__ bind(done);
BLOCK_COMMENT("} stack_overflow_check_with_compare");
}
void TemplateInterpreterGenerator::unlock_method(bool check_exceptions) {
@ -1014,10 +1045,10 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Regist
// Enlarge by locals-parameters (not in case of native_call), shrink by ESP-SP-ABI48.
if (!native_call) {
// --------------------------------------------------------------------------
// Stack overflow check
Label cont;
// Stack overflow check.
// Native calls don't need the stack size check since they have no
// expression stack and the arguments are already on the stack and
// we only add a handful of words to the stack.
__ add(R11_scratch1, parent_frame_resize, top_frame_size);
generate_stack_overflow_check(R11_scratch1, R12_scratch2);
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2015 SAP SE. All rights reserved.
* Copyright (c) 2013, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -4093,20 +4093,8 @@ void TemplateTable::monitorenter() {
__ lock_object(Rcurrent_monitor, Robj_to_lock);
// Check if there's enough space on the stack for the monitors after locking.
Label Lskip_stack_check;
// Optimization: If the monitors stack section is less then a std page size (4K) don't run
// the stack check. There should be enough shadow pages to fit that in.
__ ld(Rscratch3, 0, R1_SP);
__ sub(Rscratch3, Rscratch3, R26_monitor);
__ cmpdi(CCR0, Rscratch3, 4*K);
__ blt(CCR0, Lskip_stack_check);
DEBUG_ONLY(__ untested("stack overflow check during monitor enter");)
__ li(Rscratch1, 0);
__ generate_stack_overflow_check_with_compare_and_throw(Rscratch1, Rscratch2);
__ align(32, 12);
__ bind(Lskip_stack_check);
// This emits a single store.
__ generate_stack_overflow_check(0);
// The bcp has already been incremented. Just need to dispatch to next instruction.
__ dispatch_next(vtos);

View File

@ -59,11 +59,11 @@ define_pd_global(intx, InlineSmallCode, 1500);
// Stack slots are 2X larger in LP64 than in the 32 bit VM.
define_pd_global(intx, ThreadStackSize, 1024);
define_pd_global(intx, VMThreadStackSize, 1024);
#define DEFAULT_STACK_SHADOW_PAGES (10 DEBUG_ONLY(+1))
#define DEFAULT_STACK_SHADOW_PAGES (20 DEBUG_ONLY(+2))
#else
define_pd_global(intx, ThreadStackSize, 512);
define_pd_global(intx, VMThreadStackSize, 512);
#define DEFAULT_STACK_SHADOW_PAGES (3 DEBUG_ONLY(+1))
#define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
#endif // _LP64
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES

View File

@ -578,51 +578,39 @@ void TemplateInterpreterGenerator::lock_method() {
__ lock_object(Lmonitors, O0);
}
// See if we've got enough room on the stack for locals plus overhead below
// JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError
// without going through the signal handler, i.e., reserved and yellow zones
// will not be made usable. The shadow zone must suffice to handle the
// overflow.
void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size,
Register Rscratch,
Register Rscratch2) {
Register Rscratch) {
const int page_size = os::vm_page_size();
Label after_frame_check;
assert_different_registers(Rframe_size, Rscratch, Rscratch2);
assert_different_registers(Rframe_size, Rscratch);
__ set(page_size, Rscratch);
__ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check);
// get the stack base, and in debug, verify it is non-zero
__ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
// Get the stack overflow limit, and in debug, verify it is non-zero.
__ ld_ptr(G2_thread, JavaThread::stack_overflow_limit_offset(), Rscratch);
#ifdef ASSERT
Label base_not_zero;
__ br_notnull_short(Rscratch, Assembler::pn, base_not_zero);
__ stop("stack base is zero in generate_stack_overflow_check");
__ bind(base_not_zero);
Label limit_ok;
__ br_notnull_short(Rscratch, Assembler::pn, limit_ok);
__ stop("stack overflow limit is zero in generate_stack_overflow_check");
__ bind(limit_ok);
#endif
// get the stack size, and in debug, verify it is non-zero
assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" );
__ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
#ifdef ASSERT
Label size_not_zero;
__ br_notnull_short(Rscratch2, Assembler::pn, size_not_zero);
__ stop("stack size is zero in generate_stack_overflow_check");
__ bind(size_not_zero);
#endif
// compute the beginning of the protected zone minus the requested frame size
__ sub( Rscratch, Rscratch2, Rscratch );
__ set(MAX2(JavaThread::stack_shadow_zone_size(), JavaThread::stack_guard_zone_size()), Rscratch2 );
__ add( Rscratch, Rscratch2, Rscratch );
// Add in the size of the frame (which is the same as subtracting it from the
// SP, which would take another register
__ add( Rscratch, Rframe_size, Rscratch );
// SP, which would take another register.
__ add(Rscratch, Rframe_size, Rscratch);
// the frame is greater than one page in size, so check against
// the bottom of the stack
// The frame is greater than one page in size, so check against
// the bottom of the stack.
__ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check);
// the stack will overflow, throw an exception
// The stack will overflow, throw an exception.
// Note that SP is restored to sender's sp (in the delay slot). This
// is necessary if the sender's frame is an extended compiled frame
@ -636,8 +624,8 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe
__ jump_to(stub, Rscratch);
__ delayed()->mov(O5_savedSP, SP);
// if you get to here, then there is enough stack space
__ bind( after_frame_check );
// If you get to here, then there is enough stack space.
__ bind(after_frame_check);
}
@ -821,40 +809,44 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
__ add( Gframe_size, extra_space, Gframe_size);
__ round_to( Gframe_size, WordsPerLong );
__ sll( Gframe_size, LogBytesPerWord, Gframe_size );
// Native calls don't need the stack size check since they have no
// expression stack and the arguments are already on the stack and
// we only add a handful of words to the stack.
} else {
//
// Compute number of locals in method apart from incoming parameters
//
const Address size_of_locals (Otmp1, ConstMethod::size_of_locals_offset());
__ ld_ptr( constMethod, Otmp1 );
__ lduh( size_of_locals, Otmp1 );
__ sub( Otmp1, Glocals_size, Glocals_size );
__ round_to( Glocals_size, WordsPerLong );
__ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size );
const Address size_of_locals(Otmp1, ConstMethod::size_of_locals_offset());
__ ld_ptr(constMethod, Otmp1);
__ lduh(size_of_locals, Otmp1);
__ sub(Otmp1, Glocals_size, Glocals_size);
__ round_to(Glocals_size, WordsPerLong);
__ sll(Glocals_size, Interpreter::logStackElementSize, Glocals_size);
// see if the frame is greater than one page in size. If so,
// then we need to verify there is enough stack space remaining
// See if the frame is greater than one page in size. If so,
// then we need to verify there is enough stack space remaining.
// Frame_size = (max_stack + extra_space) * BytesPerWord;
__ ld_ptr( constMethod, Gframe_size );
__ lduh( Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size );
__ add( Gframe_size, extra_space, Gframe_size );
__ round_to( Gframe_size, WordsPerLong );
__ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size);
__ ld_ptr(constMethod, Gframe_size);
__ lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size);
__ add(Gframe_size, extra_space, Gframe_size);
__ round_to(Gframe_size, WordsPerLong);
__ sll(Gframe_size, Interpreter::logStackElementSize, Gframe_size);
// Add in java locals size for stack overflow check only
__ add( Gframe_size, Glocals_size, Gframe_size );
__ add(Gframe_size, Glocals_size, Gframe_size);
const Register Otmp2 = O4;
assert_different_registers(Otmp1, Otmp2, O5_savedSP);
generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2);
generate_stack_overflow_check(Gframe_size, Otmp1);
__ sub( Gframe_size, Glocals_size, Gframe_size);
__ sub(Gframe_size, Glocals_size, Gframe_size);
//
// bump SP to accomodate the extra locals
//
__ sub( SP, Glocals_size, SP );
__ sub(SP, Glocals_size, SP);
}
//

View File

@ -1173,6 +1173,23 @@ void Assembler::addl(Address dst, int32_t imm32) {
emit_arith_operand(0x81, rax, dst, imm32);
}
void Assembler::addb(Address dst, int imm8) {
InstructionMark im(this);
prefix(dst);
emit_int8((unsigned char)0x80);
emit_operand(rax, dst, 1);
emit_int8(imm8);
}
void Assembler::addw(Address dst, int imm16) {
InstructionMark im(this);
emit_int8(0x66);
prefix(dst);
emit_int8((unsigned char)0x81);
emit_operand(rax, dst, 2);
emit_int16(imm16);
}
void Assembler::addl(Address dst, Register src) {
InstructionMark im(this);
prefix(dst, src);
@ -4567,6 +4584,23 @@ void Assembler::xabort(int8_t imm8) {
emit_int8((unsigned char)(imm8 & 0xFF));
}
void Assembler::xaddb(Address dst, Register src) {
InstructionMark im(this);
prefix(dst, src, true);
emit_int8(0x0F);
emit_int8((unsigned char)0xC0);
emit_operand(src, dst);
}
void Assembler::xaddw(Address dst, Register src) {
InstructionMark im(this);
emit_int8(0x66);
prefix(dst, src);
emit_int8(0x0F);
emit_int8((unsigned char)0xC1);
emit_operand(src, dst);
}
void Assembler::xaddl(Address dst, Register src) {
InstructionMark im(this);
prefix(dst, src);
@ -4593,6 +4627,21 @@ void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) {
}
}
void Assembler::xchgb(Register dst, Address src) { // xchg
InstructionMark im(this);
prefix(src, dst, true);
emit_int8((unsigned char)0x86);
emit_operand(dst, src);
}
void Assembler::xchgw(Register dst, Address src) { // xchg
InstructionMark im(this);
emit_int8(0x66);
prefix(src, dst);
emit_int8((unsigned char)0x87);
emit_operand(dst, src);
}
void Assembler::xchgl(Register dst, Address src) { // xchg
InstructionMark im(this);
prefix(src, dst);

View File

@ -881,6 +881,9 @@ private:
void adcq(Register dst, Address src);
void adcq(Register dst, Register src);
void addb(Address dst, int imm8);
void addw(Address dst, int imm16);
void addl(Address dst, int32_t imm32);
void addl(Address dst, Register src);
void addl(Register dst, int32_t imm32);
@ -1816,12 +1819,15 @@ private:
void xabort(int8_t imm8);
void xaddb(Address dst, Register src);
void xaddw(Address dst, Register src);
void xaddl(Address dst, Register src);
void xaddq(Address dst, Register src);
void xbegin(Label& abort, relocInfo::relocType rtype = relocInfo::none);
void xchgb(Register reg, Address adr);
void xchgw(Register reg, Address adr);
void xchgl(Register reg, Address adr);
void xchgl(Register dst, Register src);

View File

@ -473,7 +473,11 @@ void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue)
__ jmp(do_continue, relocInfo::none);
}
// See if we've got enough room on the stack for locals plus overhead.
// See if we've got enough room on the stack for locals plus overhead below
// JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError
// without going through the signal handler, i.e., reserved and yellow zones
// will not be made usable. The shadow zone must suffice to handle the
// overflow.
// The expression stack grows down incrementally, so the normal guard
// page mechanism will work for that.
//
@ -518,40 +522,26 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
__ get_thread(thread);
#endif
const Address stack_base(thread, Thread::stack_base_offset());
const Address stack_size(thread, Thread::stack_size_offset());
const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset());
// locals + overhead, in bytes
__ mov(rax, rdx);
__ shlptr(rax, Interpreter::logStackElementSize); // 2 slots per parameter.
__ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes.
__ addptr(rax, overhead_size);
#ifdef ASSERT
Label stack_base_okay, stack_size_okay;
// verify that thread stack base is non-zero
__ cmpptr(stack_base, (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, stack_base_okay);
__ stop("stack base is zero");
__ bind(stack_base_okay);
// verify that thread stack size is non-zero
__ cmpptr(stack_size, 0);
__ jcc(Assembler::notEqual, stack_size_okay);
__ stop("stack size is zero");
__ bind(stack_size_okay);
Label limit_okay;
// Verify that thread stack overflow limit is non-zero.
__ cmpptr(stack_limit, (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, limit_okay);
__ stop("stack overflow limit is zero");
__ bind(limit_okay);
#endif
// Add stack base to locals and subtract stack size
__ addptr(rax, stack_base);
__ subptr(rax, stack_size);
// Add locals/frame size to stack limit.
__ addptr(rax, stack_limit);
// Use the bigger size for banging.
const int max_bang_size = (int)MAX2(JavaThread::stack_shadow_zone_size(),
JavaThread::stack_guard_zone_size());
// add in the red and yellow zone sizes
__ addptr(rax, max_bang_size);
// check against the current stack bottom
// Check against the current stack bottom.
__ cmpptr(rsp, rax);
__ jcc(Assembler::above, after_frame_check_pop);
@ -782,8 +772,6 @@ address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
return NULL;
}
// TODO: rather than touching all pages, check against stack_overflow_limit and bang yellow page to
// generate exception. Windows might need this to map the shadow pages though.
void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
// Quick & dirty stack overflow checking: bang the stack & handle trap.
// Note that we do the banging after the frame is setup, since the exception
@ -945,7 +933,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
#ifndef _LP64
__ shlptr(t, Interpreter::logStackElementSize);
__ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes.
__ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
__ subptr(rsp, t);
__ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics

View File

@ -2127,6 +2127,31 @@ encode %{
emit_rm( cbuf, 0x0, 1, $mem_ptr$$reg );
%}
enc_class enc_cmpxchgb(eSIRegP mem_ptr) %{
// [Lock]
if( os::is_MP() )
emit_opcode(cbuf,0xF0);
// CMPXCHGB [Eptr]
emit_opcode(cbuf,0x0F);
emit_opcode(cbuf,0xB0);
emit_rm( cbuf, 0x0, 1, $mem_ptr$$reg );
%}
enc_class enc_cmpxchgw(eSIRegP mem_ptr) %{
// [Lock]
if( os::is_MP() )
emit_opcode(cbuf,0xF0);
// 16-bit mode
emit_opcode(cbuf, 0x66);
// CMPXCHGW [Eptr]
emit_opcode(cbuf,0x0F);
emit_opcode(cbuf,0xB1);
emit_rm( cbuf, 0x0, 1, $mem_ptr$$reg );
%}
enc_class enc_flags_ne_to_boolean( iRegI res ) %{
int res_encoding = $res$$reg;
@ -7262,6 +7287,34 @@ instruct compareAndSwapP( rRegI res, pRegP mem_ptr, eAXRegP oldval, eCXRegP new
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndSwapB( rRegI res, pRegP mem_ptr, eAXRegI oldval, eCXRegI newval, eFlagsReg cr ) %{
match(Set res (CompareAndSwapB mem_ptr (Binary oldval newval)));
match(Set res (WeakCompareAndSwapB mem_ptr (Binary oldval newval)));
effect(KILL cr, KILL oldval);
format %{ "CMPXCHGB [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t"
"MOV $res,0\n\t"
"JNE,s fail\n\t"
"MOV $res,1\n"
"fail:" %}
ins_encode( enc_cmpxchgb(mem_ptr),
enc_flags_ne_to_boolean(res) );
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndSwapS( rRegI res, pRegP mem_ptr, eAXRegI oldval, eCXRegI newval, eFlagsReg cr ) %{
match(Set res (CompareAndSwapS mem_ptr (Binary oldval newval)));
match(Set res (WeakCompareAndSwapS mem_ptr (Binary oldval newval)));
effect(KILL cr, KILL oldval);
format %{ "CMPXCHGW [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t"
"MOV $res,0\n\t"
"JNE,s fail\n\t"
"MOV $res,1\n"
"fail:" %}
ins_encode( enc_cmpxchgw(mem_ptr),
enc_flags_ne_to_boolean(res) );
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndSwapI( rRegI res, pRegP mem_ptr, eAXRegI oldval, eCXRegI newval, eFlagsReg cr) %{
match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
match(Set res (WeakCompareAndSwapI mem_ptr (Binary oldval newval)));
@ -7292,6 +7345,22 @@ instruct compareAndExchangeP( pRegP mem_ptr, eAXRegP oldval, eCXRegP newval, eFl
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndExchangeB( pRegP mem_ptr, eAXRegI oldval, eCXRegI newval, eFlagsReg cr) %{
match(Set oldval (CompareAndExchangeB mem_ptr (Binary oldval newval)));
effect(KILL cr);
format %{ "CMPXCHGB [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t" %}
ins_encode( enc_cmpxchgb(mem_ptr) );
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndExchangeS( pRegP mem_ptr, eAXRegI oldval, eCXRegI newval, eFlagsReg cr) %{
match(Set oldval (CompareAndExchangeS mem_ptr (Binary oldval newval)));
effect(KILL cr);
format %{ "CMPXCHGW [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t" %}
ins_encode( enc_cmpxchgw(mem_ptr) );
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndExchangeI( pRegP mem_ptr, eAXRegI oldval, eCXRegI newval, eFlagsReg cr) %{
match(Set oldval (CompareAndExchangeI mem_ptr (Binary oldval newval)));
effect(KILL cr);
@ -7300,6 +7369,53 @@ instruct compareAndExchangeI( pRegP mem_ptr, eAXRegI oldval, eCXRegI newval, eFl
ins_pipe( pipe_cmpxchg );
%}
instruct xaddB_no_res( memory mem, Universe dummy, immI add, eFlagsReg cr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddB mem add));
effect(KILL cr);
format %{ "ADDB [$mem],$add" %}
ins_encode %{
if (os::is_MP()) { __ lock(); }
__ addb($mem$$Address, $add$$constant);
%}
ins_pipe( pipe_cmpxchg );
%}
// Important to match to xRegI: only 8-bit regs.
instruct xaddB( memory mem, xRegI newval, eFlagsReg cr) %{
match(Set newval (GetAndAddB mem newval));
effect(KILL cr);
format %{ "XADDB [$mem],$newval" %}
ins_encode %{
if (os::is_MP()) { __ lock(); }
__ xaddb($mem$$Address, $newval$$Register);
%}
ins_pipe( pipe_cmpxchg );
%}
instruct xaddS_no_res( memory mem, Universe dummy, immI add, eFlagsReg cr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddS mem add));
effect(KILL cr);
format %{ "ADDS [$mem],$add" %}
ins_encode %{
if (os::is_MP()) { __ lock(); }
__ addw($mem$$Address, $add$$constant);
%}
ins_pipe( pipe_cmpxchg );
%}
instruct xaddS( memory mem, rRegI newval, eFlagsReg cr) %{
match(Set newval (GetAndAddS mem newval));
effect(KILL cr);
format %{ "XADDS [$mem],$newval" %}
ins_encode %{
if (os::is_MP()) { __ lock(); }
__ xaddw($mem$$Address, $newval$$Register);
%}
ins_pipe( pipe_cmpxchg );
%}
instruct xaddI_no_res( memory mem, Universe dummy, immI add, eFlagsReg cr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddI mem add));
@ -7323,6 +7439,25 @@ instruct xaddI( memory mem, rRegI newval, eFlagsReg cr) %{
ins_pipe( pipe_cmpxchg );
%}
// Important to match to xRegI: only 8-bit regs.
instruct xchgB( memory mem, xRegI newval) %{
match(Set newval (GetAndSetB mem newval));
format %{ "XCHGB $newval,[$mem]" %}
ins_encode %{
__ xchgb($newval$$Register, $mem$$Address);
%}
ins_pipe( pipe_cmpxchg );
%}
instruct xchgS( memory mem, rRegI newval) %{
match(Set newval (GetAndSetS mem newval));
format %{ "XCHGW $newval,[$mem]" %}
ins_encode %{
__ xchgw($newval$$Register, $mem$$Address);
%}
ins_pipe( pipe_cmpxchg );
%}
instruct xchgI( memory mem, rRegI newval) %{
match(Set newval (GetAndSetI mem newval));
format %{ "XCHGL $newval,[$mem]" %}

View File

@ -7340,6 +7340,54 @@ instruct compareAndSwapI(rRegI res,
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndSwapB(rRegI res,
memory mem_ptr,
rax_RegI oldval, rRegI newval,
rFlagsReg cr)
%{
match(Set res (CompareAndSwapB mem_ptr (Binary oldval newval)));
match(Set res (WeakCompareAndSwapB mem_ptr (Binary oldval newval)));
effect(KILL cr, KILL oldval);
format %{ "cmpxchgb $mem_ptr,$newval\t# "
"If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
"sete $res\n\t"
"movzbl $res, $res" %}
opcode(0x0F, 0xB0);
ins_encode(lock_prefix,
REX_reg_mem(newval, mem_ptr),
OpcP, OpcS,
reg_mem(newval, mem_ptr),
REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
REX_reg_breg(res, res), // movzbl
Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndSwapS(rRegI res,
memory mem_ptr,
rax_RegI oldval, rRegI newval,
rFlagsReg cr)
%{
match(Set res (CompareAndSwapS mem_ptr (Binary oldval newval)));
match(Set res (WeakCompareAndSwapS mem_ptr (Binary oldval newval)));
effect(KILL cr, KILL oldval);
format %{ "cmpxchgw $mem_ptr,$newval\t# "
"If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
"sete $res\n\t"
"movzbl $res, $res" %}
opcode(0x0F, 0xB1);
ins_encode(lock_prefix,
SizePrefix,
REX_reg_mem(newval, mem_ptr),
OpcP, OpcS,
reg_mem(newval, mem_ptr),
REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
REX_reg_breg(res, res), // movzbl
Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndSwapN(rRegI res,
memory mem_ptr,
@ -7364,6 +7412,45 @@ instruct compareAndSwapN(rRegI res,
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndExchangeB(
memory mem_ptr,
rax_RegI oldval, rRegI newval,
rFlagsReg cr)
%{
match(Set oldval (CompareAndExchangeB mem_ptr (Binary oldval newval)));
effect(KILL cr);
format %{ "cmpxchgb $mem_ptr,$newval\t# "
"If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %}
opcode(0x0F, 0xB0);
ins_encode(lock_prefix,
REX_reg_mem(newval, mem_ptr),
OpcP, OpcS,
reg_mem(newval, mem_ptr) // lock cmpxchg
);
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndExchangeS(
memory mem_ptr,
rax_RegI oldval, rRegI newval,
rFlagsReg cr)
%{
match(Set oldval (CompareAndExchangeS mem_ptr (Binary oldval newval)));
effect(KILL cr);
format %{ "cmpxchgw $mem_ptr,$newval\t# "
"If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %}
opcode(0x0F, 0xB1);
ins_encode(lock_prefix,
SizePrefix,
REX_reg_mem(newval, mem_ptr),
OpcP, OpcS,
reg_mem(newval, mem_ptr) // lock cmpxchg
);
ins_pipe( pipe_cmpxchg );
%}
instruct compareAndExchangeI(
memory mem_ptr,
rax_RegI oldval, rRegI newval,
@ -7441,6 +7528,52 @@ instruct compareAndExchangeP(
ins_pipe( pipe_cmpxchg );
%}
instruct xaddB_no_res( memory mem, Universe dummy, immI add, rFlagsReg cr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddB mem add));
effect(KILL cr);
format %{ "ADDB [$mem],$add" %}
ins_encode %{
if (os::is_MP()) { __ lock(); }
__ addb($mem$$Address, $add$$constant);
%}
ins_pipe( pipe_cmpxchg );
%}
instruct xaddB( memory mem, rRegI newval, rFlagsReg cr) %{
match(Set newval (GetAndAddB mem newval));
effect(KILL cr);
format %{ "XADDB [$mem],$newval" %}
ins_encode %{
if (os::is_MP()) { __ lock(); }
__ xaddb($mem$$Address, $newval$$Register);
%}
ins_pipe( pipe_cmpxchg );
%}
instruct xaddS_no_res( memory mem, Universe dummy, immI add, rFlagsReg cr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddS mem add));
effect(KILL cr);
format %{ "ADDW [$mem],$add" %}
ins_encode %{
if (os::is_MP()) { __ lock(); }
__ addw($mem$$Address, $add$$constant);
%}
ins_pipe( pipe_cmpxchg );
%}
instruct xaddS( memory mem, rRegI newval, rFlagsReg cr) %{
match(Set newval (GetAndAddS mem newval));
effect(KILL cr);
format %{ "XADDW [$mem],$newval" %}
ins_encode %{
if (os::is_MP()) { __ lock(); }
__ xaddw($mem$$Address, $newval$$Register);
%}
ins_pipe( pipe_cmpxchg );
%}
instruct xaddI_no_res( memory mem, Universe dummy, immI add, rFlagsReg cr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddI mem add));
@ -7487,6 +7620,24 @@ instruct xaddL( memory mem, rRegL newval, rFlagsReg cr) %{
ins_pipe( pipe_cmpxchg );
%}
instruct xchgB( memory mem, rRegI newval) %{
match(Set newval (GetAndSetB mem newval));
format %{ "XCHGB $newval,[$mem]" %}
ins_encode %{
__ xchgb($newval$$Register, $mem$$Address);
%}
ins_pipe( pipe_cmpxchg );
%}
instruct xchgS( memory mem, rRegI newval) %{
match(Set newval (GetAndSetS mem newval));
format %{ "XCHGW $newval,[$mem]" %}
ins_encode %{
__ xchgw($newval$$Register, $mem$$Address);
%}
ins_pipe( pipe_cmpxchg );
%}
instruct xchgI( memory mem, rRegI newval) %{
match(Set newval (GetAndSetI mem newval));
format %{ "XCHGL $newval,[$mem]" %}

View File

@ -28,6 +28,7 @@ import java.util.EnumSet;
import jdk.vm.ci.code.Architecture;
import jdk.vm.ci.code.Register;
import jdk.vm.ci.code.Register.RegisterCategory;
import jdk.vm.ci.code.RegisterArray;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.PlatformKind;
@ -84,13 +85,13 @@ public class AArch64 extends Architecture {
public static final Register lr = r30;
// @formatter:off
public static final Register[] cpuRegisters = {
public static final RegisterArray cpuRegisters = new RegisterArray(
r0, r1, r2, r3, r4, r5, r6, r7,
r8, r9, r10, r11, r12, r13, r14, r15,
r16, r17, r18, r19, r20, r21, r22, r23,
r24, r25, r26, r27, r28, r29, r30, r31,
zr, sp
};
);
// @formatter:on
public static final RegisterCategory SIMD = new RegisterCategory("SIMD");
@ -130,16 +131,16 @@ public class AArch64 extends Architecture {
public static final Register v31 = new Register(65, 31, "v31", SIMD);
// @formatter:off
public static final Register[] simdRegisters = {
public static final RegisterArray simdRegisters = new RegisterArray(
v0, v1, v2, v3, v4, v5, v6, v7,
v8, v9, v10, v11, v12, v13, v14, v15,
v16, v17, v18, v19, v20, v21, v22, v23,
v24, v25, v26, v27, v28, v29, v30, v31
};
);
// @formatter:on
// @formatter:off
public static final Register[] allRegisters = {
public static final RegisterArray allRegisters = new RegisterArray(
r0, r1, r2, r3, r4, r5, r6, r7,
r8, r9, r10, r11, r12, r13, r14, r15,
r16, r17, r18, r19, r20, r21, r22, r23,
@ -150,7 +151,7 @@ public class AArch64 extends Architecture {
v8, v9, v10, v11, v12, v13, v14, v15,
v16, v17, v18, v19, v20, v21, v22, v23,
v24, v25, v26, v27, v28, v29, v30, v31
};
);
// @formatter:on
/**

View File

@ -33,6 +33,7 @@ import java.util.EnumSet;
import jdk.vm.ci.code.Architecture;
import jdk.vm.ci.code.Register;
import jdk.vm.ci.code.Register.RegisterCategory;
import jdk.vm.ci.code.RegisterArray;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.PlatformKind;
@ -131,14 +132,14 @@ public class AMD64 extends Architecture {
public static final Register k6 = new Register(54, 6, "k6", MASK);
public static final Register k7 = new Register(55, 7, "k7", MASK);
public static final Register[] valueRegistersSSE = {
public static final RegisterArray valueRegistersSSE = new RegisterArray(
rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi,
r8, r9, r10, r11, r12, r13, r14, r15,
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,
xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15
};
);
public static final Register[] valueRegistersAVX512 = {
public static final RegisterArray valueRegistersAVX512 = new RegisterArray(
rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi,
r8, r9, r10, r11, r12, r13, r14, r15,
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,
@ -146,14 +147,14 @@ public class AMD64 extends Architecture {
xmm16, xmm17, xmm18, xmm19, xmm20, xmm21, xmm22, xmm23,
xmm24, xmm25, xmm26, xmm27, xmm28, xmm29, xmm30, xmm31,
k0, k1, k2, k3, k4, k5, k6, k7
};
);
/**
* Register used to construct an instruction-relative address.
*/
public static final Register rip = new Register(56, -1, "rip", SPECIAL);
public static final Register[] allRegisters = {
public static final RegisterArray allRegisters = new RegisterArray(
rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi,
r8, r9, r10, r11, r12, r13, r14, r15,
xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,
@ -162,7 +163,7 @@ public class AMD64 extends Architecture {
xmm24, xmm25, xmm26, xmm27, xmm28, xmm29, xmm30, xmm31,
k0, k1, k2, k3, k4, k5, k6, k7,
rip
};
);
// @formatter:on
@ -245,7 +246,7 @@ public class AMD64 extends Architecture {
}
@Override
public Register[] getAvailableValueRegisters() {
public RegisterArray getAvailableValueRegisters() {
if (features.contains(CPUFeature.AVX512F)) {
return valueRegistersAVX512;
} else {

View File

@ -23,7 +23,6 @@
package jdk.vm.ci.code;
import java.nio.ByteOrder;
import java.util.Arrays;
import jdk.vm.ci.code.Register.RegisterCategory;
import jdk.vm.ci.meta.JavaKind;
@ -46,10 +45,10 @@ public abstract class Architecture {
private final String name;
/**
* Array of all available registers on this architecture. The index of each register in this
* array is equal to its {@linkplain Register#number number}.
* List of all available registers on this architecture. The index of each register in this list
* is equal to its {@linkplain Register#number number}.
*/
private final Register[] registers;
private final RegisterArray registers;
/**
* The byte ordering can be either little or big endian.
@ -78,7 +77,8 @@ public abstract class Architecture {
*/
private final int returnAddressSize;
protected Architecture(String name, PlatformKind wordKind, ByteOrder byteOrder, boolean unalignedMemoryAccess, Register[] registers, int implicitMemoryBarriers, int nativeCallDisplacementOffset,
protected Architecture(String name, PlatformKind wordKind, ByteOrder byteOrder, boolean unalignedMemoryAccess, RegisterArray registers, int implicitMemoryBarriers,
int nativeCallDisplacementOffset,
int returnAddressSize) {
this.name = name;
this.registers = registers;
@ -120,20 +120,20 @@ public abstract class Architecture {
}
/**
* Gets an array of all registers that exist on this architecture. This contains all registers
* Gets the list of all registers that exist on this architecture. This contains all registers
* that exist in the specification of this architecture. Not all of them may be available on
* this particular architecture instance. The index of each register in this array is equal to
* this particular architecture instance. The index of each register in this list is equal to
* its {@linkplain Register#number number}.
*/
public Register[] getRegisters() {
return registers.clone();
public RegisterArray getRegisters() {
return registers;
}
/**
* Gets an array of all registers available for storing values on this architecture. This may be
* a subset of {@link #getRegisters()}, depending on the capabilities of this particular CPU.
* Gets a list of all registers available for storing values on this architecture. This may be a
* subset of {@link #getRegisters()}, depending on the capabilities of this particular CPU.
*/
public Register[] getAvailableValueRegisters() {
public RegisterArray getAvailableValueRegisters() {
return getRegisters();
}
@ -206,7 +206,7 @@ public abstract class Architecture {
assert this.byteOrder.equals(that.byteOrder);
assert this.implicitMemoryBarriers == that.implicitMemoryBarriers;
assert this.machineCodeCallDisplacementOffset == that.machineCodeCallDisplacementOffset;
assert Arrays.equals(this.registers, that.registers);
assert this.registers.equals(that.registers);
assert this.returnAddressSize == that.returnAddressSize;
assert this.unalignedMemoryAccess == that.unalignedMemoryAccess;
assert this.wordKind == that.wordKind;

View File

@ -34,7 +34,7 @@ import jdk.vm.ci.meta.Value;
* where to find the local variables, operand stack values and locked objects of the bytecode
* frame(s).
*/
public class BytecodeFrame extends BytecodePosition {
public final class BytecodeFrame extends BytecodePosition {
/**
* An array of values representing how to reconstruct the state of the Java frame. This is array
@ -65,14 +65,18 @@ public class BytecodeFrame extends BytecodePosition {
* <p>
* Note that the number of locals and the number of stack slots may be smaller than the maximum
* number of locals and stack slots as specified in the compiled method.
*
* This field is intentionally exposed as a mutable array that a compiler may modify (e.g.
* during register allocation).
*/
@SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "field is intentionally mutable")//
public final JavaValue[] values;
/**
* An array describing the Java kind of the {@link #values}. It records a kind for the locals
* and the operand stack.
* An array describing the Java kinds in {@link #values}. It records a kind for the locals and
* the operand stack.
*/
public final JavaKind[] slotKinds;
private final JavaKind[] slotKinds;
/**
* The number of locals in the values array.
@ -99,8 +103,8 @@ public class BytecodeFrame extends BytecodePosition {
public final boolean rethrowException;
/**
* Specifies if this object represents a frame state in the middle of executing a call. If
* true, the arguments to the call have been popped from the stack and the return value (for a
* Specifies if this object represents a frame state in the middle of executing a call. If true,
* the arguments to the call have been popped from the stack and the return value (for a
* non-void call) has not yet been pushed.
*/
public final boolean duringCall;
@ -178,11 +182,14 @@ public class BytecodeFrame extends BytecodePosition {
* @param bci a BCI within the method
* @param rethrowException specifies if the VM should re-throw the pending exception when
* deopt'ing using this frame
* @param values the frame state {@link #values}
* @param values the frame state {@link #values}.
* @param slotKinds the kinds in {@code values}. This array is now owned by this object and must
* not be mutated by the caller.
* @param numLocals the number of local variables
* @param numStack the depth of the stack
* @param numLocks the number of locked objects
*/
@SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "caller transfers ownership of `slotKinds`")
public BytecodeFrame(BytecodeFrame caller, ResolvedJavaMethod method, int bci, boolean rethrowException, boolean duringCall, JavaValue[] values, JavaKind[] slotKinds, int numLocals, int numStack,
int numLocks) {
super(caller, method, bci);
@ -218,13 +225,45 @@ public class BytecodeFrame extends BytecodePosition {
return true;
}
/**
* Gets the kind of a local variable.
*
* @param i the local variable to query
* @return the kind of local variable {@code i}
* @throw {@link IndexOutOfBoundsException} if {@code i < 0 || i >= this.numLocals}
*/
public JavaKind getLocalValueKind(int i) {
if (i < 0 || i >= numLocals) {
throw new IndexOutOfBoundsException();
}
return slotKinds[i];
}
/**
* Gets the kind of a stack slot.
*
* @param i the local variable to query
* @return the kind of stack slot {@code i}
* @throw {@link IndexOutOfBoundsException} if {@code i < 0 || i >= this.numStack}
*/
public JavaKind getStackValueKind(int i) {
if (i < 0 || i >= numStack) {
throw new IndexOutOfBoundsException();
}
return slotKinds[i + numLocals];
}
/**
* Gets the value representing the specified local variable.
*
* @param i the local variable index
* @return the value that can be used to reconstruct the local's current value
* @throw {@link IndexOutOfBoundsException} if {@code i < 0 || i >= this.numLocals}
*/
public JavaValue getLocalValue(int i) {
if (i < 0 || i >= numLocals) {
throw new IndexOutOfBoundsException();
}
return values[i];
}
@ -233,8 +272,12 @@ public class BytecodeFrame extends BytecodePosition {
*
* @param i the stack index
* @return the value that can be used to reconstruct the stack slot's current value
* @throw {@link IndexOutOfBoundsException} if {@code i < 0 || i >= this.numStack}
*/
public JavaValue getStackValue(int i) {
if (i < 0 || i >= numStack) {
throw new IndexOutOfBoundsException();
}
return values[i + numLocals];
}
@ -243,8 +286,12 @@ public class BytecodeFrame extends BytecodePosition {
*
* @param i the lock index
* @return the value that can be used to reconstruct the lock's current value
* @throw {@link IndexOutOfBoundsException} if {@code i < 0 || i >= this.numLocks}
*/
public JavaValue getLockValue(int i) {
if (i < 0 || i >= numLocks) {
throw new IndexOutOfBoundsException();
}
return values[i + numLocals + numStack];
}
@ -257,6 +304,11 @@ public class BytecodeFrame extends BytecodePosition {
return (BytecodeFrame) getCaller();
}
@Override
public int hashCode() {
return (numLocals + 1) ^ (numStack + 11) ^ (numLocks + 7);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {

View File

@ -100,6 +100,7 @@ public class CallingConvention {
/**
* Gets the locations required for the arguments.
*/
@SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "FB false positive")
public AllocatableValue[] getArguments() {
if (argumentLocations.length == 0) {
return argumentLocations;

View File

@ -48,8 +48,10 @@ public final class DebugInfo {
*
* @param codePos the {@linkplain BytecodePosition code position} or {@linkplain BytecodeFrame
* frame} info
* @param virtualObjectMapping the mapping of {@link VirtualObject}s to their real values
* @param virtualObjectMapping the mapping of {@link VirtualObject}s to their real values. This
* array is now owned by this object and must not be mutated by the caller.
*/
@SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "caller transfers ownership of `virtualObjectMapping`")
public DebugInfo(BytecodePosition codePos, VirtualObject[] virtualObjectMapping) {
this.bytecodePosition = codePos;
this.virtualObjectMapping = virtualObjectMapping;

View File

@ -0,0 +1,105 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.code;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
/**
* An immutable ordered list of registers. Only required because Java lacks immutable arrays.
*/
public final class RegisterArray implements Iterable<Register> {
private final Register[] registers;
private int hash;
public RegisterArray(Register... registers) {
this.registers = registers;
}
public RegisterArray(Collection<Register> registers) {
this.registers = registers.toArray(new Register[registers.size()]);
}
/**
* Gets the number of registers.
*/
public int size() {
return registers.length;
}
/**
* Gets the register at a given index.
*
* @param index the index of the register to retrieve
*/
public Register get(int index) {
return registers[index];
}
public void addTo(Collection<Register> collection) {
collection.addAll(Arrays.asList(registers));
}
/**
* Gets an immutable view of the registers as a list.
*/
public List<Register> asList() {
return Collections.unmodifiableList(Arrays.asList(registers));
}
/**
* Gets a copy of the registers as an array.
*/
public Register[] toArray() {
return registers.clone();
}
public Iterator<Register> iterator() {
return Arrays.asList(registers).iterator();
}
@Override
public int hashCode() {
if (hash == 0 && registers.length > 0) {
hash = Arrays.hashCode(registers);
}
return hash;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof RegisterArray) {
return Arrays.equals(registers, ((RegisterArray) obj).registers);
}
return false;
}
@Override
public String toString() {
return Arrays.toString(registers);
}
}

View File

@ -23,6 +23,8 @@
package jdk.vm.ci.code;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
/**
* A collection of register attributes. The specific attribute values for a register may be local to
@ -53,13 +55,14 @@ public class RegisterAttributes {
* @return an array whose length is the max register number in {@code registers} plus 1. An
* element at index i holds the attributes of the register whose number is i.
*/
public static RegisterAttributes[] createMap(RegisterConfig registerConfig, Register[] registers) {
RegisterAttributes[] map = new RegisterAttributes[registers.length];
public static RegisterAttributes[] createMap(RegisterConfig registerConfig, RegisterArray registers) {
RegisterAttributes[] map = new RegisterAttributes[registers.size()];
List<Register> callerSaveRegisters = registerConfig.getCallerSaveRegisters().asList();
List<Register> calleeSaveRegisters = registerConfig.getCalleeSaveRegisters() == null ? Collections.emptyList() : registerConfig.getCalleeSaveRegisters().asList();
List<Register> allocatableRegisters = registerConfig.getAllocatableRegisters().asList();
for (Register reg : registers) {
if (reg != null) {
Register[] csr = registerConfig.getCalleeSaveRegisters();
RegisterAttributes attr = new RegisterAttributes(Arrays.asList(registerConfig.getCallerSaveRegisters()).contains(reg), csr == null ? false : Arrays.asList(csr).contains(reg),
Arrays.asList(registerConfig.getAllocatableRegisters()).contains(reg));
RegisterAttributes attr = new RegisterAttributes(callerSaveRegisters.contains(reg), calleeSaveRegisters.contains(reg), allocatableRegisters.contains(reg));
if (map.length <= reg.number) {
map = Arrays.copyOf(map, reg.number + 1);
}
@ -75,23 +78,24 @@ public class RegisterAttributes {
}
/**
* @return Denotes a register that is available for use by a register allocator.
* @return {@code true} if a register is available for use by a register allocator otherwise
* {@code false}
*/
public boolean isAllocatable() {
return allocatable;
}
/**
* @return Denotes a register whose value preservation (if required) across a call is the
* responsibility of the callee.
* @return {@code true} if a register whose value preservation (if required) across a call is
* the responsibility of the callee otherwise {@code false}
*/
public boolean isCalleeSave() {
return calleeSave;
}
/**
* @return Denotes a register whose value preservation (if required) across a call is the
* responsibility of the caller.
* @return {@code true} if a register whose value preservation (if required) across a call is
* the responsibility of the caller otherwise {@code false}
*/
public boolean isCallerSave() {
return callerSave;

View File

@ -71,7 +71,7 @@ public interface RegisterConfig {
* @return the ordered set of registers that may be used to pass parameters in a call conforming
* to {@code type}
*/
Register[] getCallingConventionRegisters(Type type, JavaKind kind);
RegisterArray getCallingConventionRegisters(Type type, JavaKind kind);
/**
* Gets the set of all registers that might be used by the register allocator.
@ -80,23 +80,23 @@ public interface RegisterConfig {
* {@link RegisterAllocationConfig#getAllocatableRegisters()}
*/
@SuppressWarnings("javadoc")
Register[] getAllocatableRegisters();
RegisterArray getAllocatableRegisters();
/**
* Filters a set of registers and returns only those that can be used by the register allocator
* for a value of a particular kind.
*/
Register[] filterAllocatableRegisters(PlatformKind kind, Register[] registers);
RegisterArray filterAllocatableRegisters(PlatformKind kind, RegisterArray registers);
/**
* Gets the registers whose values must be preserved by a method across any call it makes.
*/
Register[] getCallerSaveRegisters();
RegisterArray getCallerSaveRegisters();
/**
* Gets the registers whose values must be preserved by the callee.
*/
Register[] getCalleeSaveRegisters();
RegisterArray getCalleeSaveRegisters();
/**
* Gets a map from register {@linkplain Register#number numbers} to register

View File

@ -50,6 +50,7 @@ public final class RegisterSaveLayout {
* @param registers the keys in the map
* @param slots frame slot index for each register in {@code registers}
*/
@SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "caller transfers ownership of `registers` and `slots`")
public RegisterSaveLayout(Register[] registers, int[] slots) {
assert registers.length == slots.length;
this.registers = registers;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -20,32 +20,21 @@
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.hotspotvmconfig;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
package jdk.vm.ci.code;
/**
* Refers to a C++ constant in the VM.
* Used to suppress <a href="http://findbugs.sourceforge.net">FindBugs</a> warnings.
*/
@Target(ElementType.FIELD)
@Retention(RetentionPolicy.RUNTIME)
public @interface HotSpotVMConstant {
@interface SuppressFBWarnings {
/**
* The set of FindBugs
* <a href="http://findbugs.sourceforge.net/bugDescriptions.html">warnings</a> that are to be
* suppressed in annotated element. The value can be a bug category, kind or pattern.
*/
String[] value();
/**
* Returns the name of the constant.
*
* @return name of constant
* Reason why the warning is suppressed.
*/
String name();
/**
* List of architectures where this constant is required. Names are derived from
* {@link HotSpotVMConfig#getHostArchitectureName()}. An empty list implies that the constant is
* required on all architectures.
*/
@SuppressWarnings("javadoc")
String[] archs() default {};
String justification();
}

View File

@ -119,17 +119,20 @@ public final class VirtualObject implements JavaValue {
}
/**
* Returns an array containing all the values to be stored into the object when it is recreated.
* Returns the array containing all the values to be stored into the object when it is
* recreated. This field is intentional exposed as a mutable array that a compiler may modify
* (e.g. during register allocation).
*/
@SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "`values` is intentional mutable")//
public JavaValue[] getValues() {
return values;
}
/**
* Returns an array containing the Java kind of all values in the object.
* Returns the kind of the value at {@code index}.
*/
public JavaKind[] getSlotKinds() {
return slotKinds;
public JavaKind getSlotKind(int index) {
return slotKinds[index];
}
/**
@ -145,9 +148,13 @@ public final class VirtualObject implements JavaValue {
*
* @param values an array containing all the values to be stored into the object when it is
* recreated.
* @param slotKinds an array containing the Java kinds of the values.
* @param slotKinds an array containing the Java kinds of the values. This must have the same
* length as {@code values}. This array is now owned by this object and must not be
* mutated by the caller.
*/
@SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "caller transfers ownership of `slotKinds`")
public void setValues(JavaValue[] values, JavaKind[] slotKinds) {
assert values.length == slotKinds.length;
this.values = values;
this.slotKinds = slotKinds;
}

View File

@ -30,8 +30,8 @@ import java.util.concurrent.atomic.AtomicInteger;
* {@code "jvmci.inittimer"} system property to {@code "true"}.
*/
public final class InitTimer implements AutoCloseable {
final String name;
final long start;
private final String name;
private final long start;
private InitTimer(String name) {
int n = nesting.getAndIncrement();
@ -76,5 +76,5 @@ public final class InitTimer implements AutoCloseable {
/**
* Used to assert the invariant that all related initialization happens on the same thread.
*/
public static Thread initializingThread;
static Thread initializingThread;
}

View File

@ -38,24 +38,23 @@ import jdk.vm.ci.hotspot.HotSpotJVMCIBackendFactory;
import jdk.vm.ci.hotspot.HotSpotJVMCIRuntimeProvider;
import jdk.vm.ci.hotspot.HotSpotMetaAccessProvider;
import jdk.vm.ci.hotspot.HotSpotStackIntrospection;
import jdk.vm.ci.hotspot.HotSpotVMConfig;
import jdk.vm.ci.meta.ConstantReflectionProvider;
import jdk.vm.ci.runtime.JVMCIBackend;
public class AArch64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFactory {
protected EnumSet<AArch64.CPUFeature> computeFeatures(@SuppressWarnings("unused") HotSpotVMConfig config) {
protected EnumSet<AArch64.CPUFeature> computeFeatures(@SuppressWarnings("unused") AArch64HotSpotVMConfig config) {
// Configure the feature set using the HotSpot flag settings.
EnumSet<AArch64.CPUFeature> features = EnumSet.noneOf(AArch64.CPUFeature.class);
return features;
}
protected EnumSet<AArch64.Flag> computeFlags(@SuppressWarnings("unused") HotSpotVMConfig config) {
protected EnumSet<AArch64.Flag> computeFlags(@SuppressWarnings("unused") AArch64HotSpotVMConfig config) {
EnumSet<AArch64.Flag> flags = EnumSet.noneOf(AArch64.Flag.class);
return flags;
}
protected TargetDescription createTarget(HotSpotVMConfig config) {
protected TargetDescription createTarget(AArch64HotSpotVMConfig config) {
final int stackFrameAlignment = 16;
final int implicitNullCheckLimit = 4096;
final boolean inlineObjects = true;
@ -67,8 +66,8 @@ public class AArch64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFac
return new HotSpotConstantReflectionProvider(runtime);
}
protected RegisterConfig createRegisterConfig(HotSpotJVMCIRuntimeProvider runtime, TargetDescription target) {
return new AArch64HotSpotRegisterConfig(target, runtime.getConfig());
protected RegisterConfig createRegisterConfig(AArch64HotSpotVMConfig config, TargetDescription target) {
return new AArch64HotSpotRegisterConfig(target, config.useCompressedOops);
}
protected HotSpotCodeCacheProvider createCodeCache(HotSpotJVMCIRuntimeProvider runtime, TargetDescription target, RegisterConfig regConfig) {
@ -93,7 +92,8 @@ public class AArch64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFac
public JVMCIBackend createJVMCIBackend(HotSpotJVMCIRuntimeProvider runtime, JVMCIBackend host) {
assert host == null;
TargetDescription target = createTarget(runtime.getConfig());
AArch64HotSpotVMConfig config = new AArch64HotSpotVMConfig(runtime.getConfigStore());
TargetDescription target = createTarget(config);
RegisterConfig regConfig;
HotSpotCodeCacheProvider codeCache;
@ -105,7 +105,7 @@ public class AArch64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFac
metaAccess = createMetaAccess(runtime);
}
try (InitTimer rt = timer("create RegisterConfig")) {
regConfig = createRegisterConfig(runtime, target);
regConfig = createRegisterConfig(config, target);
}
try (InitTimer rt = timer("create CodeCache provider")) {
codeCache = createCodeCache(runtime, target, regConfig);

View File

@ -49,8 +49,6 @@ import static jdk.vm.ci.aarch64.AArch64.v7;
import static jdk.vm.ci.aarch64.AArch64.zr;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@ -60,6 +58,7 @@ import jdk.vm.ci.code.Architecture;
import jdk.vm.ci.code.CallingConvention;
import jdk.vm.ci.code.CallingConvention.Type;
import jdk.vm.ci.code.Register;
import jdk.vm.ci.code.RegisterArray;
import jdk.vm.ci.code.RegisterAttributes;
import jdk.vm.ci.code.RegisterConfig;
import jdk.vm.ci.code.StackSlot;
@ -67,7 +66,6 @@ import jdk.vm.ci.code.TargetDescription;
import jdk.vm.ci.code.ValueKindFactory;
import jdk.vm.ci.common.JVMCIError;
import jdk.vm.ci.hotspot.HotSpotCallingConventionType;
import jdk.vm.ci.hotspot.HotSpotVMConfig;
import jdk.vm.ci.meta.AllocatableValue;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.JavaType;
@ -79,30 +77,24 @@ public class AArch64HotSpotRegisterConfig implements RegisterConfig {
private final TargetDescription target;
private final Register[] allocatable;
private final int maxFrameSize;
private final RegisterArray allocatable;
/**
* The caller saved registers always include all parameter registers.
*/
private final Register[] callerSaved;
private final RegisterArray callerSaved;
private final boolean allAllocatableAreCallerSaved;
private final RegisterAttributes[] attributesMap;
public int getMaximumFrameSize() {
return maxFrameSize;
@Override
public RegisterArray getAllocatableRegisters() {
return allocatable;
}
@Override
public Register[] getAllocatableRegisters() {
return allocatable.clone();
}
@Override
public Register[] filterAllocatableRegisters(PlatformKind kind, Register[] registers) {
public RegisterArray filterAllocatableRegisters(PlatformKind kind, RegisterArray registers) {
ArrayList<Register> list = new ArrayList<>();
for (Register reg : registers) {
if (target.arch.canStoreValue(reg.getRegisterCategory(), kind)) {
@ -110,8 +102,7 @@ public class AArch64HotSpotRegisterConfig implements RegisterConfig {
}
}
Register[] ret = list.toArray(new Register[list.size()]);
return ret;
return new RegisterArray(list);
}
@Override
@ -119,9 +110,9 @@ public class AArch64HotSpotRegisterConfig implements RegisterConfig {
return attributesMap.clone();
}
private final Register[] javaGeneralParameterRegisters = {r1, r2, r3, r4, r5, r6, r7, r0};
private final Register[] nativeGeneralParameterRegisters = {r0, r1, r2, r3, r4, r5, r6, r7};
private final Register[] simdParameterRegisters = {v0, v1, v2, v3, v4, v5, v6, v7};
private final RegisterArray javaGeneralParameterRegisters = new RegisterArray(r1, r2, r3, r4, r5, r6, r7, r0);
private final RegisterArray nativeGeneralParameterRegisters = new RegisterArray(r0, r1, r2, r3, r4, r5, r6, r7);
private final RegisterArray simdParameterRegisters = new RegisterArray(v0, v1, v2, v3, v4, v5, v6, v7);
public static final Register inlineCacheRegister = r9;
@ -134,12 +125,12 @@ public class AArch64HotSpotRegisterConfig implements RegisterConfig {
public static final Register threadRegister = r28;
public static final Register fp = r29;
private static final Register[] reservedRegisters = {threadRegister, fp, lr, r31, zr, sp};
private static final RegisterArray reservedRegisters = new RegisterArray(threadRegister, fp, lr, r31, zr, sp);
private static Register[] initAllocatable(Architecture arch, boolean reserveForHeapBase) {
Register[] allRegisters = arch.getAvailableValueRegisters();
Register[] registers = new Register[allRegisters.length - reservedRegisters.length - (reserveForHeapBase ? 1 : 0)];
List<Register> reservedRegistersList = Arrays.asList(reservedRegisters);
private static RegisterArray initAllocatable(Architecture arch, boolean reserveForHeapBase) {
RegisterArray allRegisters = arch.getAvailableValueRegisters();
Register[] registers = new Register[allRegisters.size() - reservedRegisters.size() - (reserveForHeapBase ? 1 : 0)];
List<Register> reservedRegistersList = reservedRegisters.asList();
int idx = 0;
for (Register reg : allRegisters) {
@ -157,36 +148,35 @@ public class AArch64HotSpotRegisterConfig implements RegisterConfig {
}
assert idx == registers.length;
return registers;
return new RegisterArray(registers);
}
public AArch64HotSpotRegisterConfig(TargetDescription target, HotSpotVMConfig config) {
this(target, config, initAllocatable(target.arch, config.useCompressedOops));
assert callerSaved.length >= allocatable.length;
public AArch64HotSpotRegisterConfig(TargetDescription target, boolean useCompressedOops) {
this(target, initAllocatable(target.arch, useCompressedOops));
assert callerSaved.size() >= allocatable.size();
}
public AArch64HotSpotRegisterConfig(TargetDescription target, HotSpotVMConfig config, Register[] allocatable) {
public AArch64HotSpotRegisterConfig(TargetDescription target, RegisterArray allocatable) {
this.target = target;
this.maxFrameSize = config.maxFrameSize;
this.allocatable = allocatable.clone();
this.allocatable = allocatable;
Set<Register> callerSaveSet = new HashSet<>();
Collections.addAll(callerSaveSet, allocatable);
Collections.addAll(callerSaveSet, simdParameterRegisters);
Collections.addAll(callerSaveSet, javaGeneralParameterRegisters);
Collections.addAll(callerSaveSet, nativeGeneralParameterRegisters);
callerSaved = callerSaveSet.toArray(new Register[callerSaveSet.size()]);
allocatable.addTo(callerSaveSet);
simdParameterRegisters.addTo(callerSaveSet);
javaGeneralParameterRegisters.addTo(callerSaveSet);
nativeGeneralParameterRegisters.addTo(callerSaveSet);
callerSaved = new RegisterArray(callerSaveSet);
allAllocatableAreCallerSaved = true;
attributesMap = RegisterAttributes.createMap(this, AArch64.allRegisters);
}
@Override
public Register[] getCallerSaveRegisters() {
public RegisterArray getCallerSaveRegisters() {
return callerSaved;
}
public Register[] getCalleeSaveRegisters() {
public RegisterArray getCalleeSaveRegisters() {
return null;
}
@ -207,7 +197,7 @@ public class AArch64HotSpotRegisterConfig implements RegisterConfig {
}
@Override
public Register[] getCallingConventionRegisters(Type type, JavaKind kind) {
public RegisterArray getCallingConventionRegisters(Type type, JavaKind kind) {
HotSpotCallingConventionType hotspotType = (HotSpotCallingConventionType) type;
switch (kind) {
case Boolean:
@ -226,7 +216,7 @@ public class AArch64HotSpotRegisterConfig implements RegisterConfig {
}
}
private CallingConvention callingConvention(Register[] generalParameterRegisters, JavaType returnType, JavaType[] parameterTypes, HotSpotCallingConventionType type,
private CallingConvention callingConvention(RegisterArray generalParameterRegisters, JavaType returnType, JavaType[] parameterTypes, HotSpotCallingConventionType type,
ValueKindFactory<?> valueKindFactory) {
AllocatableValue[] locations = new AllocatableValue[parameterTypes.length];
@ -245,15 +235,15 @@ public class AArch64HotSpotRegisterConfig implements RegisterConfig {
case Int:
case Long:
case Object:
if (currentGeneral < generalParameterRegisters.length) {
Register register = generalParameterRegisters[currentGeneral++];
if (currentGeneral < generalParameterRegisters.size()) {
Register register = generalParameterRegisters.get(currentGeneral++);
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
}
break;
case Float:
case Double:
if (currentSIMD < simdParameterRegisters.length) {
Register register = simdParameterRegisters[currentSIMD++];
if (currentSIMD < simdParameterRegisters.size()) {
Register register = simdParameterRegisters.get(currentSIMD++);
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
}
break;
@ -302,6 +292,6 @@ public class AArch64HotSpotRegisterConfig implements RegisterConfig {
@Override
public String toString() {
return String.format("Allocatable: " + Arrays.toString(getAllocatableRegisters()) + "%n" + "CallerSave: " + Arrays.toString(getCallerSaveRegisters()) + "%n");
return String.format("Allocatable: " + getAllocatableRegisters() + "%n" + "CallerSave: " + getCallerSaveRegisters() + "%n");
}
}

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.hotspot.aarch64;
import jdk.vm.ci.hotspot.HotSpotVMConfigAccess;
import jdk.vm.ci.hotspot.HotSpotVMConfigStore;
/**
* Used to access native configuration details.
*
* All non-static, public fields in this class are so that they can be compiled as constants.
*/
class AArch64HotSpotVMConfig extends HotSpotVMConfigAccess {
AArch64HotSpotVMConfig(HotSpotVMConfigStore config) {
super(config);
}
final boolean linuxOs = System.getProperty("os.name", "").startsWith("Linux");
final boolean useCompressedOops = getFlag("UseCompressedOops", Boolean.class);
}

View File

@ -38,13 +38,12 @@ import jdk.vm.ci.hotspot.HotSpotJVMCIBackendFactory;
import jdk.vm.ci.hotspot.HotSpotJVMCIRuntimeProvider;
import jdk.vm.ci.hotspot.HotSpotMetaAccessProvider;
import jdk.vm.ci.hotspot.HotSpotStackIntrospection;
import jdk.vm.ci.hotspot.HotSpotVMConfig;
import jdk.vm.ci.meta.ConstantReflectionProvider;
import jdk.vm.ci.runtime.JVMCIBackend;
public class AMD64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFactory {
protected EnumSet<AMD64.CPUFeature> computeFeatures(HotSpotVMConfig config) {
protected EnumSet<AMD64.CPUFeature> computeFeatures(AMD64HotSpotVMConfig config) {
// Configure the feature set using the HotSpot flag settings.
EnumSet<AMD64.CPUFeature> features = EnumSet.noneOf(AMD64.CPUFeature.class);
if ((config.vmVersionFeatures & config.amd643DNOWPREFETCH) != 0) {
@ -128,7 +127,7 @@ public class AMD64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFacto
return features;
}
protected EnumSet<AMD64.Flag> computeFlags(HotSpotVMConfig config) {
protected EnumSet<AMD64.Flag> computeFlags(AMD64HotSpotVMConfig config) {
EnumSet<AMD64.Flag> flags = EnumSet.noneOf(AMD64.Flag.class);
if (config.useCountLeadingZerosInstruction) {
flags.add(AMD64.Flag.UseCountLeadingZerosInstruction);
@ -139,7 +138,7 @@ public class AMD64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFacto
return flags;
}
protected TargetDescription createTarget(HotSpotVMConfig config) {
protected TargetDescription createTarget(AMD64HotSpotVMConfig config) {
final int stackFrameAlignment = 16;
final int implicitNullCheckLimit = 4096;
final boolean inlineObjects = true;
@ -151,8 +150,8 @@ public class AMD64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFacto
return new HotSpotConstantReflectionProvider(runtime);
}
protected RegisterConfig createRegisterConfig(HotSpotJVMCIRuntimeProvider runtime, TargetDescription target) {
return new AMD64HotSpotRegisterConfig(target, runtime.getConfig());
protected RegisterConfig createRegisterConfig(AMD64HotSpotVMConfig config, TargetDescription target) {
return new AMD64HotSpotRegisterConfig(target, config.useCompressedOops, config.windowsOs);
}
protected HotSpotCodeCacheProvider createCodeCache(HotSpotJVMCIRuntimeProvider runtime, TargetDescription target, RegisterConfig regConfig) {
@ -175,9 +174,9 @@ public class AMD64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFacto
@SuppressWarnings("try")
public JVMCIBackend createJVMCIBackend(HotSpotJVMCIRuntimeProvider runtime, JVMCIBackend host) {
assert host == null;
TargetDescription target = createTarget(runtime.getConfig());
AMD64HotSpotVMConfig config = new AMD64HotSpotVMConfig(runtime.getConfigStore());
TargetDescription target = createTarget(config);
RegisterConfig regConfig;
HotSpotCodeCacheProvider codeCache;
@ -189,7 +188,7 @@ public class AMD64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFacto
metaAccess = createMetaAccess(runtime);
}
try (InitTimer rt = timer("create RegisterConfig")) {
regConfig = createRegisterConfig(runtime, target);
regConfig = createRegisterConfig(config, target);
}
try (InitTimer rt = timer("create CodeCache provider")) {
codeCache = createCodeCache(runtime, target, regConfig);

View File

@ -42,8 +42,6 @@ import static jdk.vm.ci.amd64.AMD64.xmm6;
import static jdk.vm.ci.amd64.AMD64.xmm7;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@ -52,6 +50,7 @@ import jdk.vm.ci.code.Architecture;
import jdk.vm.ci.code.CallingConvention;
import jdk.vm.ci.code.CallingConvention.Type;
import jdk.vm.ci.code.Register;
import jdk.vm.ci.code.RegisterArray;
import jdk.vm.ci.code.RegisterAttributes;
import jdk.vm.ci.code.RegisterConfig;
import jdk.vm.ci.code.StackSlot;
@ -59,7 +58,6 @@ import jdk.vm.ci.code.TargetDescription;
import jdk.vm.ci.code.ValueKindFactory;
import jdk.vm.ci.common.JVMCIError;
import jdk.vm.ci.hotspot.HotSpotCallingConventionType;
import jdk.vm.ci.hotspot.HotSpotVMConfig;
import jdk.vm.ci.meta.AllocatableValue;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.JavaType;
@ -71,30 +69,24 @@ public class AMD64HotSpotRegisterConfig implements RegisterConfig {
private final TargetDescription target;
private final Register[] allocatable;
private final int maxFrameSize;
private final RegisterArray allocatable;
/**
* The caller saved registers always include all parameter registers.
*/
private final Register[] callerSaved;
private final RegisterArray callerSaved;
private final boolean allAllocatableAreCallerSaved;
private final RegisterAttributes[] attributesMap;
public int getMaximumFrameSize() {
return maxFrameSize;
@Override
public RegisterArray getAllocatableRegisters() {
return allocatable;
}
@Override
public Register[] getAllocatableRegisters() {
return allocatable.clone();
}
@Override
public Register[] filterAllocatableRegisters(PlatformKind kind, Register[] registers) {
public RegisterArray filterAllocatableRegisters(PlatformKind kind, RegisterArray registers) {
ArrayList<Register> list = new ArrayList<>();
for (Register reg : registers) {
if (target.arch.canStoreValue(reg.getRegisterCategory(), kind)) {
@ -102,7 +94,7 @@ public class AMD64HotSpotRegisterConfig implements RegisterConfig {
}
}
Register[] ret = list.toArray(new Register[list.size()]);
RegisterArray ret = new RegisterArray(list);
return ret;
}
@ -111,9 +103,9 @@ public class AMD64HotSpotRegisterConfig implements RegisterConfig {
return attributesMap.clone();
}
private final Register[] javaGeneralParameterRegisters;
private final Register[] nativeGeneralParameterRegisters;
private final Register[] xmmParameterRegisters = {xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7};
private final RegisterArray javaGeneralParameterRegisters;
private final RegisterArray nativeGeneralParameterRegisters;
private final RegisterArray xmmParameterRegisters = new RegisterArray(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7);
/*
* Some ABIs (e.g. Windows) require a so-called "home space", that is a save area on the stack
@ -121,12 +113,12 @@ public class AMD64HotSpotRegisterConfig implements RegisterConfig {
*/
private final boolean needsNativeStackHomeSpace;
private static final Register[] reservedRegisters = {rsp, r15};
private static final RegisterArray reservedRegisters = new RegisterArray(rsp, r15);
private static Register[] initAllocatable(Architecture arch, boolean reserveForHeapBase) {
Register[] allRegisters = arch.getAvailableValueRegisters();
Register[] registers = new Register[allRegisters.length - reservedRegisters.length - (reserveForHeapBase ? 1 : 0)];
List<Register> reservedRegistersList = Arrays.asList(reservedRegisters);
private static RegisterArray initAllocatable(Architecture arch, boolean reserveForHeapBase) {
RegisterArray allRegisters = arch.getAvailableValueRegisters();
Register[] registers = new Register[allRegisters.size() - reservedRegisters.size() - (reserveForHeapBase ? 1 : 0)];
List<Register> reservedRegistersList = reservedRegisters.asList();
int idx = 0;
for (Register reg : allRegisters) {
@ -143,47 +135,46 @@ public class AMD64HotSpotRegisterConfig implements RegisterConfig {
}
assert idx == registers.length;
return registers;
return new RegisterArray(registers);
}
public AMD64HotSpotRegisterConfig(TargetDescription target, HotSpotVMConfig config) {
this(target, config, initAllocatable(target.arch, config.useCompressedOops));
assert callerSaved.length >= allocatable.length;
public AMD64HotSpotRegisterConfig(TargetDescription target, boolean useCompressedOops, boolean windowsOs) {
this(target, initAllocatable(target.arch, useCompressedOops), windowsOs);
assert callerSaved.size() >= allocatable.size();
}
public AMD64HotSpotRegisterConfig(TargetDescription target, HotSpotVMConfig config, Register[] allocatable) {
public AMD64HotSpotRegisterConfig(TargetDescription target, RegisterArray allocatable, boolean windowsOs) {
this.target = target;
this.maxFrameSize = config.maxFrameSize;
if (config.windowsOs) {
javaGeneralParameterRegisters = new Register[]{rdx, r8, r9, rdi, rsi, rcx};
nativeGeneralParameterRegisters = new Register[]{rcx, rdx, r8, r9};
if (windowsOs) {
javaGeneralParameterRegisters = new RegisterArray(rdx, r8, r9, rdi, rsi, rcx);
nativeGeneralParameterRegisters = new RegisterArray(rcx, rdx, r8, r9);
this.needsNativeStackHomeSpace = true;
} else {
javaGeneralParameterRegisters = new Register[]{rsi, rdx, rcx, r8, r9, rdi};
nativeGeneralParameterRegisters = new Register[]{rdi, rsi, rdx, rcx, r8, r9};
javaGeneralParameterRegisters = new RegisterArray(rsi, rdx, rcx, r8, r9, rdi);
nativeGeneralParameterRegisters = new RegisterArray(rdi, rsi, rdx, rcx, r8, r9);
this.needsNativeStackHomeSpace = false;
}
this.allocatable = allocatable;
Set<Register> callerSaveSet = new HashSet<>();
Collections.addAll(callerSaveSet, allocatable);
Collections.addAll(callerSaveSet, xmmParameterRegisters);
Collections.addAll(callerSaveSet, javaGeneralParameterRegisters);
Collections.addAll(callerSaveSet, nativeGeneralParameterRegisters);
callerSaved = callerSaveSet.toArray(new Register[callerSaveSet.size()]);
allocatable.addTo(callerSaveSet);
xmmParameterRegisters.addTo(callerSaveSet);
callerSaveSet.addAll(javaGeneralParameterRegisters.asList());
nativeGeneralParameterRegisters.addTo(callerSaveSet);
callerSaved = new RegisterArray(callerSaveSet);
allAllocatableAreCallerSaved = true;
attributesMap = RegisterAttributes.createMap(this, target.arch.getRegisters());
}
@Override
public Register[] getCallerSaveRegisters() {
public RegisterArray getCallerSaveRegisters() {
return callerSaved;
}
@Override
public Register[] getCalleeSaveRegisters() {
public RegisterArray getCalleeSaveRegisters() {
return null;
}
@ -204,7 +195,7 @@ public class AMD64HotSpotRegisterConfig implements RegisterConfig {
}
@Override
public Register[] getCallingConventionRegisters(Type type, JavaKind kind) {
public RegisterArray getCallingConventionRegisters(Type type, JavaKind kind) {
HotSpotCallingConventionType hotspotType = (HotSpotCallingConventionType) type;
switch (kind) {
case Boolean:
@ -223,13 +214,13 @@ public class AMD64HotSpotRegisterConfig implements RegisterConfig {
}
}
private CallingConvention callingConvention(Register[] generalParameterRegisters, JavaType returnType, JavaType[] parameterTypes, HotSpotCallingConventionType type,
private CallingConvention callingConvention(RegisterArray generalParameterRegisters, JavaType returnType, JavaType[] parameterTypes, HotSpotCallingConventionType type,
ValueKindFactory<?> valueKindFactory) {
AllocatableValue[] locations = new AllocatableValue[parameterTypes.length];
int currentGeneral = 0;
int currentXMM = 0;
int currentStackOffset = type == HotSpotCallingConventionType.NativeCall && needsNativeStackHomeSpace ? generalParameterRegisters.length * target.wordSize : 0;
int currentStackOffset = type == HotSpotCallingConventionType.NativeCall && needsNativeStackHomeSpace ? generalParameterRegisters.size() * target.wordSize : 0;
for (int i = 0; i < parameterTypes.length; i++) {
final JavaKind kind = parameterTypes[i].getJavaKind().getStackKind();
@ -242,15 +233,15 @@ public class AMD64HotSpotRegisterConfig implements RegisterConfig {
case Int:
case Long:
case Object:
if (currentGeneral < generalParameterRegisters.length) {
Register register = generalParameterRegisters[currentGeneral++];
if (currentGeneral < generalParameterRegisters.size()) {
Register register = generalParameterRegisters.get(currentGeneral++);
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
}
break;
case Float:
case Double:
if (currentXMM < xmmParameterRegisters.length) {
Register register = xmmParameterRegisters[currentXMM++];
if (currentXMM < xmmParameterRegisters.size()) {
Register register = xmmParameterRegisters.get(currentXMM++);
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
}
break;
@ -299,6 +290,6 @@ public class AMD64HotSpotRegisterConfig implements RegisterConfig {
@Override
public String toString() {
return String.format("Allocatable: " + Arrays.toString(getAllocatableRegisters()) + "%n" + "CallerSave: " + Arrays.toString(getCallerSaveRegisters()) + "%n");
return String.format("Allocatable: " + getAllocatableRegisters() + "%n" + "CallerSave: " + getCallerSaveRegisters() + "%n");
}
}

View File

@ -0,0 +1,81 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.hotspot.amd64;
import jdk.vm.ci.hotspot.HotSpotVMConfigAccess;
import jdk.vm.ci.hotspot.HotSpotVMConfigStore;
class AMD64HotSpotVMConfig extends HotSpotVMConfigAccess {
AMD64HotSpotVMConfig(HotSpotVMConfigStore config) {
super(config);
}
final boolean windowsOs = System.getProperty("os.name", "").startsWith("Windows");
final boolean useCountLeadingZerosInstruction = getFlag("UseCountLeadingZerosInstruction", Boolean.class);
final boolean useCountTrailingZerosInstruction = getFlag("UseCountTrailingZerosInstruction", Boolean.class);
final boolean useCompressedOops = getFlag("UseCompressedOops", Boolean.class);
// CPU capabilities
final int useSSE = getFlag("UseSSE", Integer.class);
final int useAVX = getFlag("UseAVX", Integer.class);
final long vmVersionFeatures = getFieldValue("Abstract_VM_Version::_features", Long.class, "uint64_t");
// CPU feature flags
final long amd64CX8 = getConstant("VM_Version::CPU_CX8", Long.class);
final long amd64CMOV = getConstant("VM_Version::CPU_CMOV", Long.class);
final long amd64FXSR = getConstant("VM_Version::CPU_FXSR", Long.class);
final long amd64HT = getConstant("VM_Version::CPU_HT", Long.class);
final long amd64MMX = getConstant("VM_Version::CPU_MMX", Long.class);
final long amd643DNOWPREFETCH = getConstant("VM_Version::CPU_3DNOW_PREFETCH", Long.class);
final long amd64SSE = getConstant("VM_Version::CPU_SSE", Long.class);
final long amd64SSE2 = getConstant("VM_Version::CPU_SSE2", Long.class);
final long amd64SSE3 = getConstant("VM_Version::CPU_SSE3", Long.class);
final long amd64SSSE3 = getConstant("VM_Version::CPU_SSSE3", Long.class);
final long amd64SSE4A = getConstant("VM_Version::CPU_SSE4A", Long.class);
final long amd64SSE41 = getConstant("VM_Version::CPU_SSE4_1", Long.class);
final long amd64SSE42 = getConstant("VM_Version::CPU_SSE4_2", Long.class);
final long amd64POPCNT = getConstant("VM_Version::CPU_POPCNT", Long.class);
final long amd64LZCNT = getConstant("VM_Version::CPU_LZCNT", Long.class);
final long amd64TSC = getConstant("VM_Version::CPU_TSC", Long.class);
final long amd64TSCINV = getConstant("VM_Version::CPU_TSCINV", Long.class);
final long amd64AVX = getConstant("VM_Version::CPU_AVX", Long.class);
final long amd64AVX2 = getConstant("VM_Version::CPU_AVX2", Long.class);
final long amd64AES = getConstant("VM_Version::CPU_AES", Long.class);
final long amd64ERMS = getConstant("VM_Version::CPU_ERMS", Long.class);
final long amd64CLMUL = getConstant("VM_Version::CPU_CLMUL", Long.class);
final long amd64BMI1 = getConstant("VM_Version::CPU_BMI1", Long.class);
final long amd64BMI2 = getConstant("VM_Version::CPU_BMI2", Long.class);
final long amd64RTM = getConstant("VM_Version::CPU_RTM", Long.class);
final long amd64ADX = getConstant("VM_Version::CPU_ADX", Long.class);
final long amd64AVX512F = getConstant("VM_Version::CPU_AVX512F", Long.class);
final long amd64AVX512DQ = getConstant("VM_Version::CPU_AVX512DQ", Long.class);
final long amd64AVX512PF = getConstant("VM_Version::CPU_AVX512PF", Long.class);
final long amd64AVX512ER = getConstant("VM_Version::CPU_AVX512ER", Long.class);
final long amd64AVX512CD = getConstant("VM_Version::CPU_AVX512CD", Long.class);
final long amd64AVX512BW = getConstant("VM_Version::CPU_AVX512BW", Long.class);
final long amd64AVX512VL = getConstant("VM_Version::CPU_AVX512VL", Long.class);
final long amd64SHA = getConstant("VM_Version::CPU_SHA", Long.class);
}

View File

@ -37,14 +37,13 @@ import jdk.vm.ci.hotspot.HotSpotJVMCIBackendFactory;
import jdk.vm.ci.hotspot.HotSpotJVMCIRuntimeProvider;
import jdk.vm.ci.hotspot.HotSpotMetaAccessProvider;
import jdk.vm.ci.hotspot.HotSpotStackIntrospection;
import jdk.vm.ci.hotspot.HotSpotVMConfig;
import jdk.vm.ci.runtime.JVMCIBackend;
import jdk.vm.ci.sparc.SPARC;
import jdk.vm.ci.sparc.SPARC.CPUFeature;
public class SPARCHotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFactory {
protected TargetDescription createTarget(HotSpotVMConfig config) {
protected TargetDescription createTarget(SPARCHotSpotVMConfig config) {
final int stackFrameAlignment = 16;
final int implicitNullCheckLimit = 4096;
final boolean inlineObjects = false;
@ -56,7 +55,7 @@ public class SPARCHotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFacto
return new HotSpotCodeCacheProvider(runtime, runtime.getConfig(), target, regConfig);
}
protected EnumSet<CPUFeature> computeFeatures(HotSpotVMConfig config) {
protected EnumSet<CPUFeature> computeFeatures(SPARCHotSpotVMConfig config) {
EnumSet<CPUFeature> features = EnumSet.noneOf(CPUFeature.class);
if ((config.vmVersionFeatures & config.sparcVis1Instructions) != 0) {
features.add(CPUFeature.VIS1);
@ -143,10 +142,11 @@ public class SPARCHotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFacto
@SuppressWarnings("try")
public JVMCIBackend createJVMCIBackend(HotSpotJVMCIRuntimeProvider runtime, JVMCIBackend host) {
assert host == null;
TargetDescription target = createTarget(runtime.getConfig());
SPARCHotSpotVMConfig config = new SPARCHotSpotVMConfig(runtime.getConfigStore());
TargetDescription target = createTarget(config);
HotSpotMetaAccessProvider metaAccess = new HotSpotMetaAccessProvider(runtime);
RegisterConfig regConfig = new SPARCHotSpotRegisterConfig(target, runtime.getConfig());
RegisterConfig regConfig = new SPARCHotSpotRegisterConfig(target, config.useCompressedOops);
HotSpotCodeCacheProvider codeCache = createCodeCache(runtime, target, regConfig);
HotSpotConstantReflectionProvider constantReflection = new HotSpotConstantReflectionProvider(runtime);
StackIntrospection stackIntrospection = new HotSpotStackIntrospection(runtime);

View File

@ -65,8 +65,6 @@ import static jdk.vm.ci.sparc.SPARC.o5;
import static jdk.vm.ci.sparc.SPARC.sp;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
@ -74,6 +72,7 @@ import jdk.vm.ci.code.Architecture;
import jdk.vm.ci.code.CallingConvention;
import jdk.vm.ci.code.CallingConvention.Type;
import jdk.vm.ci.code.Register;
import jdk.vm.ci.code.RegisterArray;
import jdk.vm.ci.code.RegisterAttributes;
import jdk.vm.ci.code.RegisterConfig;
import jdk.vm.ci.code.StackSlot;
@ -81,7 +80,6 @@ import jdk.vm.ci.code.TargetDescription;
import jdk.vm.ci.code.ValueKindFactory;
import jdk.vm.ci.common.JVMCIError;
import jdk.vm.ci.hotspot.HotSpotCallingConventionType;
import jdk.vm.ci.hotspot.HotSpotVMConfig;
import jdk.vm.ci.meta.AllocatableValue;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.JavaType;
@ -93,7 +91,7 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
private final TargetDescription target;
private final Register[] allocatable;
private final RegisterArray allocatable;
private final RegisterAttributes[] attributesMap;
@ -103,20 +101,19 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
private final boolean addNativeRegisterArgumentSlots;
@Override
public Register[] getAllocatableRegisters() {
return allocatable.clone();
public RegisterArray getAllocatableRegisters() {
return allocatable;
}
@Override
public Register[] filterAllocatableRegisters(PlatformKind kind, Register[] registers) {
public RegisterArray filterAllocatableRegisters(PlatformKind kind, RegisterArray registers) {
ArrayList<Register> list = new ArrayList<>();
for (Register reg : registers) {
if (target.arch.canStoreValue(reg.getRegisterCategory(), kind)) {
list.add(reg);
}
}
Register[] ret = list.toArray(new Register[list.size()]);
return ret;
return new RegisterArray(list);
}
@Override
@ -124,30 +121,29 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
return attributesMap.clone();
}
private final Register[] cpuCallerParameterRegisters = {o0, o1, o2, o3, o4, o5};
private final Register[] cpuCalleeParameterRegisters = {i0, i1, i2, i3, i4, i5};
private final RegisterArray cpuCallerParameterRegisters = new RegisterArray(o0, o1, o2, o3, o4, o5);
private final RegisterArray cpuCalleeParameterRegisters = new RegisterArray(i0, i1, i2, i3, i4, i5);
private final Register[] fpuFloatParameterRegisters = {f0, f1, f2, f3, f4, f5, f6, f7};
private final Register[] fpuDoubleParameterRegisters = {d0, null, d2, null, d4, null, d6, null};
private final RegisterArray fpuFloatParameterRegisters = new RegisterArray(f0, f1, f2, f3, f4, f5, f6, f7);
private final RegisterArray fpuDoubleParameterRegisters = new RegisterArray(d0, null, d2, null, d4, null, d6, null);
// @formatter:off
private final Register[] callerSaveRegisters;
private final RegisterArray callerSaveRegisters;
/**
* Registers saved by the callee. This lists all L and I registers which are saved in the
* register window.
* This lists all L and I registers which are saved in the register window.
*/
private final Register[] calleeSaveRegisters = {
private final RegisterArray windowSaveRegisters = new RegisterArray(
l0, l1, l2, l3, l4, l5, l6, l7,
i0, i1, i2, i3, i4, i5, i6, i7};
i0, i1, i2, i3, i4, i5, i6, i7);
// @formatter:on
private static final Register[] reservedRegisters = {sp, g0, g2};
private static final RegisterArray reservedRegisters = new RegisterArray(sp, g0, g2);
private static Register[] initAllocatable(Architecture arch, boolean reserveForHeapBase) {
Register[] allRegisters = arch.getAvailableValueRegisters();
Register[] registers = new Register[allRegisters.length - reservedRegisters.length - (reserveForHeapBase ? 1 : 0)];
List<Register> reservedRegistersList = Arrays.asList(reservedRegisters);
private static RegisterArray initAllocatable(Architecture arch, boolean reserveForHeapBase) {
RegisterArray allRegisters = arch.getAvailableValueRegisters();
Register[] registers = new Register[allRegisters.size() - reservedRegisters.size() - (reserveForHeapBase ? 1 : 0)];
List<Register> reservedRegistersList = reservedRegisters.asList();
int idx = 0;
for (Register reg : allRegisters) {
@ -164,33 +160,33 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
}
assert idx == registers.length;
return registers;
return new RegisterArray(registers);
}
public SPARCHotSpotRegisterConfig(TargetDescription target, HotSpotVMConfig config) {
this(target, initAllocatable(target.arch, config.useCompressedOops), config);
public SPARCHotSpotRegisterConfig(TargetDescription target, boolean useCompressedOops) {
this(target, initAllocatable(target.arch, useCompressedOops));
}
public SPARCHotSpotRegisterConfig(TargetDescription target, Register[] allocatable, HotSpotVMConfig config) {
public SPARCHotSpotRegisterConfig(TargetDescription target, RegisterArray allocatable) {
this.target = target;
this.allocatable = allocatable.clone();
this.addNativeRegisterArgumentSlots = config.linuxOs;
HashSet<Register> callerSaveSet = new HashSet<>();
Collections.addAll(callerSaveSet, target.arch.getAvailableValueRegisters());
for (Register cs : calleeSaveRegisters) {
this.allocatable = allocatable;
this.addNativeRegisterArgumentSlots = false;
HashSet<Register> callerSaveSet = new HashSet<>(target.arch.getAvailableValueRegisters().asList());
for (Register cs : windowSaveRegisters) {
callerSaveSet.remove(cs);
}
this.callerSaveRegisters = callerSaveSet.toArray(new Register[callerSaveSet.size()]);
this.callerSaveRegisters = new RegisterArray(callerSaveSet);
attributesMap = RegisterAttributes.createMap(this, SPARC.allRegisters);
}
@Override
public Register[] getCallerSaveRegisters() {
public RegisterArray getCallerSaveRegisters() {
return callerSaveRegisters;
}
public Register[] getCalleeSaveRegisters() {
return calleeSaveRegisters;
@Override
public RegisterArray getCalleeSaveRegisters() {
return null;
}
@Override
@ -211,7 +207,7 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
}
@Override
public Register[] getCallingConventionRegisters(Type type, JavaKind kind) {
public RegisterArray getCallingConventionRegisters(Type type, JavaKind kind) {
HotSpotCallingConventionType hotspotType = (HotSpotCallingConventionType) type;
switch (kind) {
case Boolean:
@ -230,7 +226,7 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
}
}
private CallingConvention callingConvention(Register[] generalParameterRegisters, JavaType returnType, JavaType[] parameterTypes, HotSpotCallingConventionType type,
private CallingConvention callingConvention(RegisterArray generalParameterRegisters, JavaType returnType, JavaType[] parameterTypes, HotSpotCallingConventionType type,
ValueKindFactory<?> valueKindFactory) {
AllocatableValue[] locations = new AllocatableValue[parameterTypes.length];
@ -249,25 +245,25 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
case Int:
case Long:
case Object:
if (currentGeneral < generalParameterRegisters.length) {
Register register = generalParameterRegisters[currentGeneral++];
if (currentGeneral < generalParameterRegisters.size()) {
Register register = generalParameterRegisters.get(currentGeneral++);
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
}
break;
case Double:
if (currentFloating < fpuFloatParameterRegisters.length) {
if (currentFloating < fpuFloatParameterRegisters.size()) {
if (currentFloating % 2 != 0) {
// Make register number even to be a double reg
currentFloating++;
}
Register register = fpuDoubleParameterRegisters[currentFloating];
Register register = fpuDoubleParameterRegisters.get(currentFloating);
currentFloating += 2; // Only every second is a double register
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
}
break;
case Float:
if (currentFloating < fpuFloatParameterRegisters.length) {
Register register = fpuFloatParameterRegisters[currentFloating++];
if (currentFloating < fpuFloatParameterRegisters.size()) {
Register register = fpuFloatParameterRegisters.get(currentFloating++);
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
}
break;
@ -292,7 +288,7 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
int outArgSpillArea;
if (type == HotSpotCallingConventionType.NativeCall && addNativeRegisterArgumentSlots) {
// Space for native callee which may spill our outgoing arguments
outArgSpillArea = Math.min(locations.length, generalParameterRegisters.length) * target.wordSize;
outArgSpillArea = Math.min(locations.length, generalParameterRegisters.size()) * target.wordSize;
} else {
outArgSpillArea = 0;
}
@ -337,6 +333,6 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
@Override
public String toString() {
return String.format("Allocatable: " + Arrays.toString(getAllocatableRegisters()) + "%n" + "CallerSave: " + Arrays.toString(getCallerSaveRegisters()) + "%n");
return String.format("Allocatable: " + getAllocatableRegisters() + "%n" + "CallerSave: " + getCallerSaveRegisters() + "%n");
}
}

View File

@ -0,0 +1,71 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.hotspot.sparc;
import jdk.vm.ci.hotspot.HotSpotVMConfigAccess;
import jdk.vm.ci.hotspot.HotSpotVMConfigStore;
/**
* Used to access native configuration details.
*
* All non-static, public fields in this class are so that they can be compiled as constants.
*/
class SPARCHotSpotVMConfig extends HotSpotVMConfigAccess {
SPARCHotSpotVMConfig(HotSpotVMConfigStore config) {
super(config);
}
final boolean useCompressedOops = getFlag("UseCompressedOops", Boolean.class);
// CPU capabilities
final long vmVersionFeatures = getFieldValue("Abstract_VM_Version::_features", Long.class, "uint64_t");
// SPARC specific values
final int sparcVis3Instructions = getConstant("VM_Version::vis3_instructions_m", Integer.class);
final int sparcVis2Instructions = getConstant("VM_Version::vis2_instructions_m", Integer.class);
final int sparcVis1Instructions = getConstant("VM_Version::vis1_instructions_m", Integer.class);
final int sparcCbcondInstructions = getConstant("VM_Version::cbcond_instructions_m", Integer.class);
final int sparcV8Instructions = getConstant("VM_Version::v8_instructions_m", Integer.class);
final int sparcHardwareMul32 = getConstant("VM_Version::hardware_mul32_m", Integer.class);
final int sparcHardwareDiv32 = getConstant("VM_Version::hardware_div32_m", Integer.class);
final int sparcHardwareFsmuld = getConstant("VM_Version::hardware_fsmuld_m", Integer.class);
final int sparcHardwarePopc = getConstant("VM_Version::hardware_popc_m", Integer.class);
final int sparcV9Instructions = getConstant("VM_Version::v9_instructions_m", Integer.class);
final int sparcSun4v = getConstant("VM_Version::sun4v_m", Integer.class);
final int sparcBlkInitInstructions = getConstant("VM_Version::blk_init_instructions_m", Integer.class);
final int sparcFmafInstructions = getConstant("VM_Version::fmaf_instructions_m", Integer.class);
final int sparcFmauInstructions = getConstant("VM_Version::fmau_instructions_m", Integer.class);
final int sparcSparc64Family = getConstant("VM_Version::sparc64_family_m", Integer.class);
final int sparcMFamily = getConstant("VM_Version::M_family_m", Integer.class);
final int sparcTFamily = getConstant("VM_Version::T_family_m", Integer.class);
final int sparcT1Model = getConstant("VM_Version::T1_model_m", Integer.class);
final int sparcSparc5Instructions = getConstant("VM_Version::sparc5_instructions_m", Integer.class);
final int sparcAesInstructions = getConstant("VM_Version::aes_instructions_m", Integer.class);
final int sparcSha1Instruction = getConstant("VM_Version::sha1_instruction_m", Integer.class);
final int sparcSha256Instruction = getConstant("VM_Version::sha256_instruction_m", Integer.class);
final int sparcSha512Instruction = getConstant("VM_Version::sha512_instruction_m", Integer.class);
final boolean useBlockZeroing = getFlag("UseBlockZeroing", Boolean.class);
final int blockZeroingLowLimit = getFlag("BlockZeroingLowLimit", Integer.class);
}

View File

@ -35,11 +35,9 @@ import jdk.vm.ci.code.InvalidInstalledCodeException;
import jdk.vm.ci.code.TargetDescription;
import jdk.vm.ci.common.InitTimer;
import jdk.vm.ci.common.JVMCIError;
import jdk.vm.ci.hotspotvmconfig.HotSpotVMField;
import jdk.vm.ci.meta.JavaType;
import jdk.vm.ci.meta.ResolvedJavaMethod;
import jdk.vm.ci.meta.ResolvedJavaType;
import jdk.internal.misc.Unsafe;
/**
* Calls from Java into HotSpot. The behavior of all the methods in this class that take a native
@ -267,8 +265,9 @@ final class CompilerToVM {
native HotSpotResolvedObjectTypeImpl resolveTypeInPool(HotSpotConstantPool constantPool, int cpi) throws LinkageError;
/**
* Looks up and attempts to resolve the {@code JVM_CONSTANT_Field} entry at index {@code cpi} in
* {@code constantPool}. The values returned in {@code info} are:
* Looks up and attempts to resolve the {@code JVM_CONSTANT_Field} entry for at index {@code cpi} in
* {@code constantPool}. For some opcodes, checks are performed that require the {@code method}
* that contains {@code opcode} to be specified. The values returned in {@code info} are:
*
* <pre>
* [(int) flags, // only valid if field is resolved
@ -281,7 +280,7 @@ final class CompilerToVM {
* @param info an array in which the details of the field are returned
* @return the type defining the field if resolution is successful, 0 otherwise
*/
native HotSpotResolvedObjectTypeImpl resolveFieldInPool(HotSpotConstantPool constantPool, int cpi, byte opcode, long[] info);
native HotSpotResolvedObjectTypeImpl resolveFieldInPool(HotSpotConstantPool constantPool, int cpi, HotSpotResolvedJavaMethodImpl method, byte opcode, long[] info);
/**
* Converts {@code cpci} from an index into the cache for {@code constantPool} to an index
@ -338,9 +337,22 @@ final class CompilerToVM {
native void resetCompilationStatistics();
/**
* Initializes the fields of {@code config}.
* Reads the database of VM info. The return value encodes the info in a nested object array
* that is described by the pseudo Java object {@code info} below:
*
* <pre>
* info = [
* VMField[] vmFields,
* [String name, Long size, ...] vmTypeSizes,
* [String name, Long value, ...] vmConstants,
* [String name, Long value, ...] vmAddresses,
* VMFlag[] vmFlags
* ]
* </pre>
*
* @return VM info as encoded above
*/
native long initializeConfiguration(HotSpotVMConfig config);
native Object[] readConfiguration();
/**
* Resolves the implementation of {@code method} for virtual dispatches on objects of dynamic
@ -428,7 +440,6 @@ final class CompilerToVM {
* <li>{@link HotSpotVMConfig#localVariableTableElementLengthOffset}</li>
* <li>{@link HotSpotVMConfig#localVariableTableElementNameCpIndexOffset}</li>
* <li>{@link HotSpotVMConfig#localVariableTableElementDescriptorCpIndexOffset}</li>
* <li>{@link HotSpotVMConfig#localVariableTableElementSignatureCpIndexOffset}
* <li>{@link HotSpotVMConfig#localVariableTableElementSlotOffset}
* <li>{@link HotSpotVMConfig#localVariableTableElementStartBciOffset}
* </ul>

View File

@ -42,7 +42,7 @@ import jdk.vm.ci.meta.SpeculationLog;
public class HotSpotCodeCacheProvider implements CodeCacheProvider {
protected final HotSpotJVMCIRuntimeProvider runtime;
public final HotSpotVMConfig config;
protected final HotSpotVMConfig config;
protected final TargetDescription target;
protected final RegisterConfig regConfig;
@ -80,12 +80,13 @@ public class HotSpotCodeCacheProvider implements CodeCacheProvider {
for (Field f : fields) {
if (f.getName().endsWith("Stub")) {
f.setAccessible(true);
Object address;
try {
Object address = f.get(runtime.getConfig());
address = f.get(runtime.getConfig());
if (address.equals(call.target)) {
return f.getName() + ":0x" + Long.toHexString((Long) address);
}
} catch (Exception e) {
} catch (IllegalArgumentException | IllegalAccessException e) {
}
}
}

View File

@ -115,6 +115,7 @@ public class HotSpotCompiledCode implements CompiledCode {
}
}
@SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "caller transfers ownership of `sites`, `targetCode`, `comments`, `methods`, `dataSection`, `dataSectionPatches` and `assumptions`")
public HotSpotCompiledCode(String name, byte[] targetCode, int targetCodeSize, Site[] sites, Assumption[] assumptions, ResolvedJavaMethod[] methods, Comment[] comments, byte[] dataSection,
int dataSectionAlignment, DataPatch[] dataSectionPatches, boolean isImmutablePIC, int totalFrameSize, StackSlot deoptRescueSlot) {
this.name = name;

View File

@ -196,7 +196,9 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
*/
@SuppressWarnings("unused")
private static HotSpotConstantPool fromMetaspace(long metaspaceConstantPool) {
return new HotSpotConstantPool(metaspaceConstantPool);
HotSpotConstantPool cp = new HotSpotConstantPool(metaspaceConstantPool);
runtime().metaAccessContext.add(cp);
return cp;
}
private HotSpotConstantPool(long metaspaceConstantPool) {
@ -298,7 +300,8 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
*/
private long getEntryAt(int index) {
assertBounds(index);
return UNSAFE.getAddress(getMetaspaceConstantPool() + config().constantPoolSize + index * runtime().getHostJVMCIBackend().getTarget().wordSize);
int offset = index * runtime().getHostJVMCIBackend().getTarget().wordSize;
return UNSAFE.getAddress(getMetaspaceConstantPool() + config().constantPoolSize + offset);
}
/**
@ -309,7 +312,8 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
*/
private int getIntAt(int index) {
assertTag(index, JVM_CONSTANT.Integer);
return UNSAFE.getInt(getMetaspaceConstantPool() + config().constantPoolSize + index * runtime().getHostJVMCIBackend().getTarget().wordSize);
int offset = index * runtime().getHostJVMCIBackend().getTarget().wordSize;
return UNSAFE.getInt(getMetaspaceConstantPool() + config().constantPoolSize + offset);
}
/**
@ -320,7 +324,8 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
*/
private long getLongAt(int index) {
assertTag(index, JVM_CONSTANT.Long);
return UNSAFE.getLong(getMetaspaceConstantPool() + config().constantPoolSize + index * runtime().getHostJVMCIBackend().getTarget().wordSize);
int offset = index * runtime().getHostJVMCIBackend().getTarget().wordSize;
return UNSAFE.getLong(getMetaspaceConstantPool() + config().constantPoolSize + offset);
}
/**
@ -331,7 +336,8 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
*/
private float getFloatAt(int index) {
assertTag(index, JVM_CONSTANT.Float);
return UNSAFE.getFloat(getMetaspaceConstantPool() + config().constantPoolSize + index * runtime().getHostJVMCIBackend().getTarget().wordSize);
int offset = index * runtime().getHostJVMCIBackend().getTarget().wordSize;
return UNSAFE.getFloat(getMetaspaceConstantPool() + config().constantPoolSize + offset);
}
/**
@ -342,7 +348,8 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
*/
private double getDoubleAt(int index) {
assertTag(index, JVM_CONSTANT.Double);
return UNSAFE.getDouble(getMetaspaceConstantPool() + config().constantPoolSize + index * runtime().getHostJVMCIBackend().getTarget().wordSize);
int offset = index * runtime().getHostJVMCIBackend().getTarget().wordSize;
return UNSAFE.getDouble(getMetaspaceConstantPool() + config().constantPoolSize + offset);
}
/**
@ -353,7 +360,8 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
*/
private int getNameAndTypeAt(int index) {
assertTag(index, JVM_CONSTANT.NameAndType);
return UNSAFE.getInt(getMetaspaceConstantPool() + config().constantPoolSize + index * runtime().getHostJVMCIBackend().getTarget().wordSize);
int offset = index * runtime().getHostJVMCIBackend().getTarget().wordSize;
return UNSAFE.getInt(getMetaspaceConstantPool() + config().constantPoolSize + offset);
}
/**
@ -434,7 +442,8 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
*/
private int getUncachedKlassRefIndexAt(int index) {
assertTagIsFieldOrMethod(index);
final int refIndex = UNSAFE.getInt(getMetaspaceConstantPool() + config().constantPoolSize + index * runtime().getHostJVMCIBackend().getTarget().wordSize);
int offset = index * runtime().getHostJVMCIBackend().getTarget().wordSize;
final int refIndex = UNSAFE.getInt(getMetaspaceConstantPool() + config().constantPoolSize + offset);
// klass ref index is in the low 16-bits.
return refIndex & 0xFFFF;
}
@ -587,7 +596,7 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
}
@Override
public JavaField lookupField(int cpi, int opcode) {
public JavaField lookupField(int cpi, ResolvedJavaMethod method, int opcode) {
final int index = rawIndexToConstantPoolIndex(cpi, opcode);
final int nameAndTypeIndex = getNameAndTypeRefIndexAt(index);
final int nameIndex = getNameRefIndexAt(nameAndTypeIndex);
@ -603,7 +612,7 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
long[] info = new long[2];
HotSpotResolvedObjectTypeImpl resolvedHolder;
try {
resolvedHolder = compilerToVM().resolveFieldInPool(this, index, (byte) opcode, info);
resolvedHolder = compilerToVM().resolveFieldInPool(this, index, (HotSpotResolvedJavaMethodImpl) method, (byte) opcode, info);
} catch (Throwable t) {
/*
* If there was an exception resolving the field we give up and return an unresolved
@ -680,15 +689,15 @@ final class HotSpotConstantPool implements ConstantPool, MetaspaceWrapperObject
if (!klass.isPrimitive() && !klass.isArray()) {
UNSAFE.ensureClassInitialized(klass);
}
switch (tag) {
case MethodRef:
if (Bytecodes.isInvokeHandleAlias(opcode)) {
final int methodRefCacheIndex = rawIndexToConstantPoolIndex(cpi, opcode);
if (isInvokeHandle(methodRefCacheIndex, type)) {
compilerToVM().resolveInvokeHandleInPool(this, methodRefCacheIndex);
}
if (tag == JVM_CONSTANT.MethodRef) {
if (Bytecodes.isInvokeHandleAlias(opcode)) {
final int methodRefCacheIndex = rawIndexToConstantPoolIndex(cpi, opcode);
if (isInvokeHandle(methodRefCacheIndex, type)) {
compilerToVM().resolveInvokeHandleInPool(this, methodRefCacheIndex);
}
}
}
break;
case InvokeDynamic:
if (isInvokedynamicIndex(cpi)) {

View File

@ -25,7 +25,6 @@ package jdk.vm.ci.hotspot;
import java.lang.reflect.Array;
import java.util.Objects;
import jdk.internal.vm.annotation.Stable;
import jdk.vm.ci.common.JVMCIError;
import jdk.vm.ci.meta.Constant;
import jdk.vm.ci.meta.ConstantReflectionProvider;

View File

@ -29,6 +29,7 @@ public class HotSpotForeignCallTarget implements InvokeTarget {
/**
* The entry point address of this call's target.
*/
@SuppressFBWarnings(value = "URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD", justification = "accessed by subclasses")//
protected long address;
public HotSpotForeignCallTarget(long address) {

View File

@ -22,8 +22,6 @@
*/
package jdk.vm.ci.hotspot;
import java.lang.reflect.Module;
import jdk.vm.ci.code.CompilationRequest;
import jdk.vm.ci.common.JVMCIError;
import jdk.vm.ci.hotspot.HotSpotJVMCIRuntime.Option;
@ -68,7 +66,6 @@ final class HotSpotJVMCICompilerConfig {
if (compilerName != null) {
for (JVMCICompilerFactory f : Services.load(JVMCICompilerFactory.class)) {
if (f.getCompilerName().equals(compilerName)) {
Module jvmciModule = JVMCICompilerFactory.class.getModule();
Services.exportJVMCITo(f.getClass());
f.onSelection();
factory = f;

View File

@ -135,6 +135,7 @@ public class HotSpotJVMCIMetaAccessContext {
*/
metadataRoots = list.getHead();
}
assert isRegistered(metaspaceObject);
}
protected ResolvedJavaType createClass(Class<?> javaClass) {
@ -208,7 +209,7 @@ public class HotSpotJVMCIMetaAccessContext {
ChunkIterator() {
currentChunk = head;
currentIndex = -1;
findNext();
next = findNext();
}
Object[] currentChunk;
@ -245,4 +246,13 @@ public class HotSpotJVMCIMetaAccessContext {
}
}
synchronized boolean isRegistered(MetaspaceWrapperObject wrapper) {
for (WeakReference<MetaspaceWrapperObject> m : list) {
if (m != null && m.get() == wrapper) {
return true;
}
}
return false;
}
}

View File

@ -27,10 +27,6 @@ import static jdk.vm.ci.common.InitTimer.timer;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintStream;
import java.lang.reflect.Array;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@ -48,6 +44,7 @@ import jdk.vm.ci.code.InstalledCode;
import jdk.vm.ci.common.InitTimer;
import jdk.vm.ci.common.JVMCIError;
import jdk.vm.ci.hotspot.services.HotSpotJVMCICompilerFactory;
import jdk.vm.ci.hotspot.services.HotSpotJVMCICompilerFactory.CompilationLevel;
import jdk.vm.ci.hotspot.services.HotSpotVMEventListener;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.JavaType;
@ -94,9 +91,10 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
*/
public enum Option {
Compiler(String.class, null, "Selects the system compiler."),
// Note: The following one is not used (see InitTimer.ENABLED).
// Note: The following one is not used (see InitTimer.ENABLED). It is added here
// so that -Djvmci.PrintFlags=true shows the option.
InitTimer(boolean.class, false, "Specifies if initialization timing is enabled."),
PrintConfig(boolean.class, false, "Prints all HotSpotVMConfig fields."),
PrintConfig(boolean.class, false, "Prints VM configuration available via JVMCI and exits."),
PrintFlags(boolean.class, false, "Prints all JVMCI flags and exits."),
ShowFlags(boolean.class, false, "Prints all JVMCI flags and continues."),
TraceMethodDataFilter(String.class, null, "");
@ -200,6 +198,7 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
protected final CompilerToVM compilerToVm;
protected final HotSpotVMConfigStore configStore;
protected final HotSpotVMConfig config;
private final JVMCIBackend hostBackend;
@ -240,11 +239,13 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
@SuppressWarnings("unused") private final String[] trivialPrefixes;
@SuppressWarnings("try")
@SuppressFBWarnings(value = "DM_EXIT", justification = "PrintFlags is meant to exit the VM")
private HotSpotJVMCIRuntime() {
compilerToVm = new CompilerToVM();
try (InitTimer t = timer("HotSpotVMConfig<init>")) {
config = new HotSpotVMConfig(compilerToVm);
configStore = new HotSpotVMConfigStore(compilerToVm);
config = new HotSpotVMConfig(configStore);
}
String hostArchitecture = config.getHostArchitectureName();
@ -270,18 +271,32 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
}
if (Option.PrintConfig.getBoolean()) {
printConfig(config, compilerToVm);
printConfig(configStore, compilerToVm);
System.exit(0);
}
compilerFactory = HotSpotJVMCICompilerConfig.getCompilerFactory();
if (compilerFactory instanceof HotSpotJVMCICompilerFactory) {
hsCompilerFactory = (HotSpotJVMCICompilerFactory) compilerFactory;
trivialPrefixes = hsCompilerFactory.getTrivialPrefixes();
compilationLevelAdjustment = hsCompilerFactory.getCompilationLevelAdjustment(config);
switch (hsCompilerFactory.getCompilationLevelAdjustment()) {
case None:
compilationLevelAdjustment = config.compLevelAdjustmentNone;
break;
case ByHolder:
compilationLevelAdjustment = config.compLevelAdjustmentByHolder;
break;
case ByFullSignature:
compilationLevelAdjustment = config.compLevelAdjustmentByFullSignature;
break;
default:
compilationLevelAdjustment = config.compLevelAdjustmentNone;
break;
}
} else {
hsCompilerFactory = null;
trivialPrefixes = null;
compilationLevelAdjustment = 0;
compilationLevelAdjustment = config.compLevelAdjustmentNone;
}
}
@ -296,6 +311,10 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
return metaAccessContext.fromClass(javaClass);
}
public HotSpotVMConfigStore getConfigStore() {
return configStore;
}
public HotSpotVMConfig getConfig() {
return config;
}
@ -352,7 +371,35 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
*/
@SuppressWarnings({"unused"})
private int adjustCompilationLevel(Class<?> declaringClass, String name, String signature, boolean isOsr, int level) {
return hsCompilerFactory.adjustCompilationLevel(config, declaringClass, name, signature, isOsr, level);
CompilationLevel curLevel;
if (level == config.compilationLevelNone) {
curLevel = CompilationLevel.None;
} else if (level == config.compilationLevelSimple) {
curLevel = CompilationLevel.Simple;
} else if (level == config.compilationLevelLimitedProfile) {
curLevel = CompilationLevel.LimitedProfile;
} else if (level == config.compilationLevelFullProfile) {
curLevel = CompilationLevel.FullProfile;
} else if (level == config.compilationLevelFullOptimization) {
curLevel = CompilationLevel.FullOptimization;
} else {
throw JVMCIError.shouldNotReachHere();
}
switch (hsCompilerFactory.adjustCompilationLevel(declaringClass, name, signature, isOsr, curLevel)) {
case None:
return config.compilationLevelNone;
case Simple:
return config.compilationLevelSimple;
case LimitedProfile:
return config.compilationLevelLimitedProfile;
case FullProfile:
return config.compilationLevelFullProfile;
case FullOptimization:
return config.compilationLevelFullOptimization;
default:
return level;
}
}
/**
@ -416,71 +463,40 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
}
}
private static void printConfig(HotSpotVMConfig config, CompilerToVM vm) {
Field[] fields = config.getClass().getDeclaredFields();
Map<String, Field> sortedFields = new TreeMap<>();
for (Field f : fields) {
if (!f.isSynthetic() && !Modifier.isStatic(f.getModifiers())) {
f.setAccessible(true);
sortedFields.put(f.getName(), f);
}
}
for (Field f : sortedFields.values()) {
try {
String line = String.format("%9s %-40s = %s%n", f.getType().getSimpleName(), f.getName(), pretty(f.get(config)));
byte[] lineBytes = line.getBytes();
vm.writeDebugOutput(lineBytes, 0, lineBytes.length);
vm.flushDebugOutput();
} catch (Exception e) {
}
}
@SuppressFBWarnings(value = "DM_DEFAULT_ENCODING", justification = "no localization here please!")
private static void printConfigLine(CompilerToVM vm, String format, Object... args) {
String line = String.format(format, args);
byte[] lineBytes = line.getBytes();
vm.writeDebugOutput(lineBytes, 0, lineBytes.length);
vm.flushDebugOutput();
}
private static String pretty(Object value) {
if (value == null) {
return "null";
private static void printConfig(HotSpotVMConfigStore store, CompilerToVM vm) {
TreeMap<String, VMField> fields = new TreeMap<>(store.getFields());
for (VMField field : fields.values()) {
if (!field.isStatic()) {
printConfigLine(vm, "[vmconfig:instance field] %s %s {offset=%d[0x%x]}%n", field.type, field.name, field.offset, field.offset);
} else {
String value = field.value == null ? "null" : String.format("%d[0x%x]", field.value, field.value);
printConfigLine(vm, "[vmconfig:static field] %s %s = %s {address=0x%x}%n", field.type, field.name, value, field.address);
}
}
Class<?> klass = value.getClass();
if (value instanceof String) {
return "\"" + value + "\"";
} else if (value instanceof Method) {
return "method \"" + ((Method) value).getName() + "\"";
} else if (value instanceof Class<?>) {
return "class \"" + ((Class<?>) value).getSimpleName() + "\"";
} else if (value instanceof Integer) {
if ((Integer) value < 10) {
return value.toString();
}
return value + " (0x" + Integer.toHexString((Integer) value) + ")";
} else if (value instanceof Long) {
if ((Long) value < 10 && (Long) value > -10) {
return value + "l";
}
return value + "l (0x" + Long.toHexString((Long) value) + "l)";
} else if (klass.isArray()) {
StringBuilder str = new StringBuilder();
int dimensions = 0;
while (klass.isArray()) {
dimensions++;
klass = klass.getComponentType();
}
int length = Array.getLength(value);
str.append(klass.getSimpleName()).append('[').append(length).append(']');
for (int i = 1; i < dimensions; i++) {
str.append("[]");
}
str.append(" {");
for (int i = 0; i < length; i++) {
str.append(pretty(Array.get(value, i)));
if (i < length - 1) {
str.append(", ");
}
}
str.append('}');
return str.toString();
TreeMap<String, VMFlag> flags = new TreeMap<>(store.getFlags());
for (VMFlag flag : flags.values()) {
printConfigLine(vm, "[vmconfig:flag] %s %s = %s%n", flag.type, flag.name, flag.value);
}
TreeMap<String, Long> addresses = new TreeMap<>(store.getAddresses());
for (Map.Entry<String, Long> e : addresses.entrySet()) {
printConfigLine(vm, "[vmconfig:address] %s = %d[0x%x]%n", e.getKey(), e.getValue(), e.getValue());
}
TreeMap<String, Long> constants = new TreeMap<>(store.getConstants());
for (Map.Entry<String, Long> e : constants.entrySet()) {
printConfigLine(vm, "[vmconfig:constant] %s = %d[0x%x]%n", e.getKey(), e.getValue(), e.getValue());
}
TreeMap<String, Long> typeSizes = new TreeMap<>(store.getTypeSizes());
for (Map.Entry<String, Long> e : typeSizes.entrySet()) {
printConfigLine(vm, "[vmconfig:type size] %s = %d%n", e.getKey(), e.getValue());
}
return value.toString();
}
public OutputStream getLogStream() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,6 +36,8 @@ import jdk.vm.ci.runtime.JVMCIRuntime;
*/
public interface HotSpotJVMCIRuntimeProvider extends JVMCIRuntime {
HotSpotVMConfigStore getConfigStore();
HotSpotVMConfig getConfig();
CompilerToVM getCompilerToVM();

View File

@ -22,7 +22,6 @@
*/
package jdk.vm.ci.hotspot;
import jdk.vm.ci.hotspot.HotSpotVMConfig.CompressEncoding;
import jdk.vm.ci.meta.Constant;
import jdk.vm.ci.meta.JavaConstant;
import jdk.vm.ci.meta.MemoryAccessProvider;
@ -32,11 +31,11 @@ import jdk.vm.ci.meta.MemoryAccessProvider;
*/
public interface HotSpotMemoryAccessProvider extends MemoryAccessProvider {
JavaConstant readNarrowOopConstant(Constant base, long displacement, CompressEncoding encoding);
JavaConstant readNarrowOopConstant(Constant base, long displacement);
Constant readKlassPointerConstant(Constant base, long displacement);
Constant readNarrowKlassPointerConstant(Constant base, long displacement, CompressEncoding encoding);
Constant readNarrowKlassPointerConstant(Constant base, long displacement);
Constant readMethodPointerConstant(Constant base, long displacement);
}

View File

@ -23,7 +23,7 @@
package jdk.vm.ci.hotspot;
import static jdk.vm.ci.hotspot.UnsafeAccess.UNSAFE;
import jdk.vm.ci.hotspot.HotSpotVMConfig.CompressEncoding;
import jdk.vm.ci.meta.Constant;
import jdk.vm.ci.meta.JavaConstant;
import jdk.vm.ci.meta.JavaKind;
@ -206,8 +206,7 @@ class HotSpotMemoryAccessProviderImpl implements HotSpotMemoryAccessProvider {
}
@Override
public JavaConstant readNarrowOopConstant(Constant base, long displacement, CompressEncoding encoding) {
assert encoding.equals(runtime.getConfig().getOopEncoding()) : "unexpected oop encoding: " + encoding + " != " + runtime.getConfig().getOopEncoding();
public JavaConstant readNarrowOopConstant(Constant base, long displacement) {
return HotSpotObjectConstantImpl.forObject(readRawObject(base, displacement, true), true);
}
@ -227,7 +226,7 @@ class HotSpotMemoryAccessProviderImpl implements HotSpotMemoryAccessProvider {
}
@Override
public Constant readNarrowKlassPointerConstant(Constant base, long displacement, CompressEncoding encoding) {
public Constant readNarrowKlassPointerConstant(Constant base, long displacement) {
HotSpotResolvedObjectTypeImpl klass = readKlass(base, displacement, true);
if (klass == null) {
return HotSpotCompressedNullConstant.COMPRESSED_NULL;

View File

@ -30,7 +30,7 @@ import static jdk.vm.ci.hotspot.UnsafeAccess.UNSAFE;
import java.util.Arrays;
import jdk.vm.ci.hotspot.HotSpotMethodDataAccessor.Tag;
import jdk.internal.misc.Unsafe;
import jdk.vm.ci.meta.DeoptimizationReason;
import jdk.vm.ci.meta.JavaMethodProfile;
import jdk.vm.ci.meta.JavaMethodProfile.ProfiledMethod;
@ -39,41 +39,20 @@ import jdk.vm.ci.meta.JavaTypeProfile.ProfiledType;
import jdk.vm.ci.meta.ResolvedJavaMethod;
import jdk.vm.ci.meta.ResolvedJavaType;
import jdk.vm.ci.meta.TriState;
import jdk.internal.misc.Unsafe;
/**
* Access to a HotSpot MethodData structure (defined in methodData.hpp).
* Access to a HotSpot {@code MethodData} structure (defined in methodData.hpp).
*/
public final class HotSpotMethodData {
final class HotSpotMethodData {
private static final HotSpotVMConfig config = config();
private static final HotSpotMethodDataAccessor NO_DATA_NO_EXCEPTION_ACCESSOR = new NoMethodData(TriState.FALSE);
private static final HotSpotMethodDataAccessor NO_DATA_EXCEPTION_POSSIBLY_NOT_RECORDED_ACCESSOR = new NoMethodData(TriState.UNKNOWN);
// sorted by tag
// @formatter:off
private static final HotSpotMethodDataAccessor[] PROFILE_DATA_ACCESSORS = {
null,
new BitData(),
new CounterData(),
new JumpData(),
new ReceiverTypeData(),
new VirtualCallData(),
new RetData(),
new BranchData(),
new MultiBranchData(),
new ArgInfoData(),
new UnknownProfileData(Tag.CallTypeData),
new VirtualCallTypeData(),
new UnknownProfileData(Tag.ParametersTypeData),
new UnknownProfileData(Tag.SpeculativeTrapData),
};
// @formatter:on
static final HotSpotVMConfig config = config();
static final HotSpotMethodDataAccessor NO_DATA_NO_EXCEPTION_ACCESSOR = new NoMethodData(config, config.dataLayoutNoTag, TriState.FALSE);
static final HotSpotMethodDataAccessor NO_DATA_EXCEPTION_POSSIBLY_NOT_RECORDED_ACCESSOR = new NoMethodData(config, config.dataLayoutNoTag, TriState.UNKNOWN);
/**
* Reference to the C++ MethodData object.
*/
private final long metaspaceMethodData;
final long metaspaceMethodData;
@SuppressWarnings("unused") private final HotSpotResolvedJavaMethodImpl method;
public HotSpotMethodData(long metaspaceMethodData, HotSpotResolvedJavaMethodImpl method) {
@ -133,10 +112,7 @@ public final class HotSpotMethodData {
return null;
}
HotSpotMethodDataAccessor result = getData(position);
final Tag tag = AbstractMethodData.readTag(this, position);
assert result != null : "NO_DATA tag is not allowed " + tag;
return result;
return getData(position);
}
public HotSpotMethodDataAccessor getExtraData(int position) {
@ -160,18 +136,18 @@ public final class HotSpotMethodData {
private HotSpotMethodDataAccessor getData(int position) {
assert position >= 0 : "out of bounds";
final Tag tag = AbstractMethodData.readTag(this, position);
HotSpotMethodDataAccessor accessor = PROFILE_DATA_ACCESSORS[tag.getValue()];
final int tag = HotSpotMethodDataAccessor.readTag(config, this, position);
HotSpotMethodDataAccessor accessor = PROFILE_DATA_ACCESSORS[tag];
assert accessor == null || accessor.getTag() == tag : "wrong data accessor " + accessor + " for tag " + tag;
return accessor;
}
private int readUnsignedByte(int position, int offsetInBytes) {
int readUnsignedByte(int position, int offsetInBytes) {
long fullOffsetInBytes = computeFullOffset(position, offsetInBytes);
return UNSAFE.getByte(metaspaceMethodData + fullOffsetInBytes) & 0xFF;
}
private int readUnsignedShort(int position, int offsetInBytes) {
int readUnsignedShort(int position, int offsetInBytes) {
long fullOffsetInBytes = computeFullOffset(position, offsetInBytes);
return UNSAFE.getShort(metaspaceMethodData + fullOffsetInBytes) & 0xFFFF;
}
@ -269,102 +245,14 @@ public final class HotSpotMethodData {
return sb.toString();
}
private abstract static class AbstractMethodData implements HotSpotMethodDataAccessor {
static final int NO_DATA_SIZE = cellIndexToOffset(0);
/**
* Corresponds to {@code exception_seen_flag}.
*/
private static final int EXCEPTIONS_MASK = 1 << config.bitDataExceptionSeenFlag;
private final Tag tag;
protected final int staticSize;
protected AbstractMethodData(Tag tag, int staticSize) {
this.tag = tag;
this.staticSize = staticSize;
}
public Tag getTag() {
return tag;
}
public static Tag readTag(HotSpotMethodData data, int position) {
final int tag = data.readUnsignedByte(position, config.dataLayoutTagOffset);
return Tag.getEnum(tag);
}
@Override
public int getBCI(HotSpotMethodData data, int position) {
return data.readUnsignedShort(position, config.dataLayoutBCIOffset);
}
@Override
public final int getSize(HotSpotMethodData data, int position) {
int size = staticSize + getDynamicSize(data, position);
// Sanity check against VM
int vmSize = HotSpotJVMCIRuntime.runtime().compilerToVm.methodDataProfileDataSize(data.metaspaceMethodData, position);
assert size == vmSize : size + " != " + vmSize;
return size;
}
@Override
public TriState getExceptionSeen(HotSpotMethodData data, int position) {
return TriState.get((getFlags(data, position) & EXCEPTIONS_MASK) != 0);
}
@Override
public JavaTypeProfile getTypeProfile(HotSpotMethodData data, int position) {
return null;
}
@Override
public JavaMethodProfile getMethodProfile(HotSpotMethodData data, int position) {
return null;
}
@Override
public double getBranchTakenProbability(HotSpotMethodData data, int position) {
return -1;
}
@Override
public double[] getSwitchProbabilities(HotSpotMethodData data, int position) {
return null;
}
@Override
public int getExecutionCount(HotSpotMethodData data, int position) {
return -1;
}
@Override
public TriState getNullSeen(HotSpotMethodData data, int position) {
return TriState.UNKNOWN;
}
protected int getFlags(HotSpotMethodData data, int position) {
return data.readUnsignedByte(position, config.dataLayoutFlagsOffset);
}
/**
* @param data
* @param position
*/
protected int getDynamicSize(HotSpotMethodData data, int position) {
return 0;
}
public abstract StringBuilder appendTo(StringBuilder sb, HotSpotMethodData data, int pos);
}
private static class NoMethodData extends AbstractMethodData {
private static final int NO_DATA_SIZE = cellIndexToOffset(0);
static class NoMethodData extends HotSpotMethodDataAccessor {
private final TriState exceptionSeen;
protected NoMethodData(TriState exceptionSeen) {
super(Tag.No, NO_DATA_SIZE);
protected NoMethodData(HotSpotVMConfig config, int tag, TriState exceptionSeen) {
super(config, tag, NO_DATA_SIZE);
this.exceptionSeen = exceptionSeen;
}
@ -384,17 +272,17 @@ public final class HotSpotMethodData {
}
}
private static class BitData extends AbstractMethodData {
static final int BIT_DATA_SIZE = cellIndexToOffset(0);
static final int BIT_DATA_NULL_SEEN_FLAG = 1 << config.bitDataNullSeenFlag;
private static final int BIT_DATA_SIZE = cellIndexToOffset(0);
private static final int BIT_DATA_NULL_SEEN_FLAG = 1 << config.bitDataNullSeenFlag;
static class BitData extends HotSpotMethodDataAccessor {
private BitData() {
super(Tag.BitData, BIT_DATA_SIZE);
private BitData(HotSpotVMConfig config, int tag) {
super(config, tag, BIT_DATA_SIZE);
}
protected BitData(Tag tag, int staticSize) {
super(tag, staticSize);
protected BitData(HotSpotVMConfig config, int tag, int staticSize) {
super(config, tag, staticSize);
}
@Override
@ -408,17 +296,17 @@ public final class HotSpotMethodData {
}
}
private static class CounterData extends BitData {
static final int COUNTER_DATA_SIZE = cellIndexToOffset(1);
static final int COUNTER_DATA_COUNT_OFFSET = cellIndexToOffset(config.methodDataCountOffset);
private static final int COUNTER_DATA_SIZE = cellIndexToOffset(1);
private static final int COUNTER_DATA_COUNT_OFFSET = cellIndexToOffset(config.methodDataCountOffset);
static class CounterData extends BitData {
CounterData() {
super(Tag.CounterData, COUNTER_DATA_SIZE);
CounterData(HotSpotVMConfig config, int tag) {
super(config, tag, COUNTER_DATA_SIZE);
}
protected CounterData(Tag tag, int staticSize) {
super(tag, staticSize);
protected CounterData(HotSpotVMConfig config, int tag, int staticSize) {
super(config, tag, staticSize);
}
@Override
@ -436,18 +324,18 @@ public final class HotSpotMethodData {
}
}
private static class JumpData extends AbstractMethodData {
static final int JUMP_DATA_SIZE = cellIndexToOffset(2);
static final int TAKEN_COUNT_OFFSET = cellIndexToOffset(config.jumpDataTakenOffset);
static final int TAKEN_DISPLACEMENT_OFFSET = cellIndexToOffset(config.jumpDataDisplacementOffset);
private static final int JUMP_DATA_SIZE = cellIndexToOffset(2);
protected static final int TAKEN_COUNT_OFFSET = cellIndexToOffset(config.jumpDataTakenOffset);
protected static final int TAKEN_DISPLACEMENT_OFFSET = cellIndexToOffset(config.jumpDataDisplacementOffset);
static class JumpData extends HotSpotMethodDataAccessor {
JumpData() {
super(Tag.JumpData, JUMP_DATA_SIZE);
JumpData(HotSpotVMConfig config, int tag) {
super(config, tag, JUMP_DATA_SIZE);
}
protected JumpData(Tag tag, int staticSize) {
super(tag, staticSize);
protected JumpData(HotSpotVMConfig config, int tag, int staticSize) {
super(config, tag, staticSize);
}
@Override
@ -484,16 +372,16 @@ public final class HotSpotMethodData {
}
}
private abstract static class AbstractTypeData extends CounterData {
static final int TYPE_DATA_ROW_SIZE = cellsToBytes(config.receiverTypeDataReceiverTypeRowCellCount);
protected static final int TYPE_DATA_ROW_SIZE = cellsToBytes(config.receiverTypeDataReceiverTypeRowCellCount);
static final int NONPROFILED_COUNT_OFFSET = cellIndexToOffset(config.receiverTypeDataNonprofiledCountOffset);
static final int TYPE_DATA_FIRST_TYPE_OFFSET = cellIndexToOffset(config.receiverTypeDataReceiver0Offset);
static final int TYPE_DATA_FIRST_TYPE_COUNT_OFFSET = cellIndexToOffset(config.receiverTypeDataCount0Offset);
protected static final int NONPROFILED_COUNT_OFFSET = cellIndexToOffset(config.receiverTypeDataNonprofiledCountOffset);
protected static final int TYPE_DATA_FIRST_TYPE_OFFSET = cellIndexToOffset(config.receiverTypeDataReceiver0Offset);
protected static final int TYPE_DATA_FIRST_TYPE_COUNT_OFFSET = cellIndexToOffset(config.receiverTypeDataCount0Offset);
abstract static class AbstractTypeData extends CounterData {
protected AbstractTypeData(Tag tag, int staticSize) {
super(tag, staticSize);
protected AbstractTypeData(HotSpotVMConfig config, int tag, int staticSize) {
super(config, tag, staticSize);
}
@Override
@ -539,7 +427,7 @@ public final class HotSpotMethodData {
protected abstract long getTypesNotRecordedExecutionCount(HotSpotMethodData data, int position);
private static JavaTypeProfile createTypeProfile(TriState nullSeen, RawItemProfile<ResolvedJavaType> profile) {
private JavaTypeProfile createTypeProfile(TriState nullSeen, RawItemProfile<ResolvedJavaType> profile) {
if (profile.entries <= 0 || profile.totalCount <= 0) {
return null;
}
@ -583,16 +471,16 @@ public final class HotSpotMethodData {
}
}
private static class ReceiverTypeData extends AbstractTypeData {
static final int TYPE_CHECK_DATA_SIZE = cellIndexToOffset(2) + TYPE_DATA_ROW_SIZE * config.typeProfileWidth;
private static final int TYPE_CHECK_DATA_SIZE = cellIndexToOffset(2) + TYPE_DATA_ROW_SIZE * config.typeProfileWidth;
static class ReceiverTypeData extends AbstractTypeData {
ReceiverTypeData() {
super(Tag.ReceiverTypeData, TYPE_CHECK_DATA_SIZE);
ReceiverTypeData(HotSpotVMConfig config, int tag) {
super(config, tag, TYPE_CHECK_DATA_SIZE);
}
protected ReceiverTypeData(Tag tag, int staticSize) {
super(tag, staticSize);
protected ReceiverTypeData(HotSpotVMConfig config, int tag, int staticSize) {
super(config, tag, staticSize);
}
@Override
@ -606,18 +494,18 @@ public final class HotSpotMethodData {
}
}
private static class VirtualCallData extends ReceiverTypeData {
static final int VIRTUAL_CALL_DATA_SIZE = cellIndexToOffset(2) + TYPE_DATA_ROW_SIZE * (config.typeProfileWidth + config.methodProfileWidth);
static final int VIRTUAL_CALL_DATA_FIRST_METHOD_OFFSET = TYPE_DATA_FIRST_TYPE_OFFSET + TYPE_DATA_ROW_SIZE * config.typeProfileWidth;
static final int VIRTUAL_CALL_DATA_FIRST_METHOD_COUNT_OFFSET = TYPE_DATA_FIRST_TYPE_COUNT_OFFSET + TYPE_DATA_ROW_SIZE * config.typeProfileWidth;
private static final int VIRTUAL_CALL_DATA_SIZE = cellIndexToOffset(2) + TYPE_DATA_ROW_SIZE * (config.typeProfileWidth + config.methodProfileWidth);
private static final int VIRTUAL_CALL_DATA_FIRST_METHOD_OFFSET = TYPE_DATA_FIRST_TYPE_OFFSET + TYPE_DATA_ROW_SIZE * config.typeProfileWidth;
private static final int VIRTUAL_CALL_DATA_FIRST_METHOD_COUNT_OFFSET = TYPE_DATA_FIRST_TYPE_COUNT_OFFSET + TYPE_DATA_ROW_SIZE * config.typeProfileWidth;
static class VirtualCallData extends ReceiverTypeData {
VirtualCallData() {
super(Tag.VirtualCallData, VIRTUAL_CALL_DATA_SIZE);
VirtualCallData(HotSpotVMConfig config, int tag) {
super(config, tag, VIRTUAL_CALL_DATA_SIZE);
}
protected VirtualCallData(Tag tag, int staticSize) {
super(tag, staticSize);
protected VirtualCallData(HotSpotVMConfig config, int tag, int staticSize) {
super(config, tag, staticSize);
}
@Override
@ -647,7 +535,7 @@ public final class HotSpotMethodData {
return createMethodProfile(getRawMethodProfile(data, position));
}
private static RawItemProfile<ResolvedJavaMethod> getRawMethodProfile(HotSpotMethodData data, int position) {
private RawItemProfile<ResolvedJavaMethod> getRawMethodProfile(HotSpotMethodData data, int position) {
int profileWidth = config.methodProfileWidth;
ResolvedJavaMethod[] methods = new ResolvedJavaMethod[profileWidth];
@ -671,7 +559,7 @@ public final class HotSpotMethodData {
return new RawItemProfile<>(entries, methods, counts, totalCount);
}
private static JavaMethodProfile createMethodProfile(RawItemProfile<ResolvedJavaMethod> profile) {
private JavaMethodProfile createMethodProfile(RawItemProfile<ResolvedJavaMethod> profile) {
if (profile.entries <= 0 || profile.totalCount <= 0) {
return null;
}
@ -712,10 +600,10 @@ public final class HotSpotMethodData {
}
}
private static class VirtualCallTypeData extends VirtualCallData {
static class VirtualCallTypeData extends VirtualCallData {
VirtualCallTypeData() {
super(Tag.VirtualCallTypeData, 0);
VirtualCallTypeData(HotSpotVMConfig config, int tag) {
super(config, tag, 0);
}
@Override
@ -725,23 +613,23 @@ public final class HotSpotMethodData {
}
}
private static class RetData extends CounterData {
static final int RET_DATA_ROW_SIZE = cellsToBytes(3);
static final int RET_DATA_SIZE = cellIndexToOffset(1) + RET_DATA_ROW_SIZE * config.bciProfileWidth;
private static final int RET_DATA_ROW_SIZE = cellsToBytes(3);
private static final int RET_DATA_SIZE = cellIndexToOffset(1) + RET_DATA_ROW_SIZE * config.bciProfileWidth;
static class RetData extends CounterData {
RetData() {
super(Tag.RetData, RET_DATA_SIZE);
RetData(HotSpotVMConfig config, int tag) {
super(config, tag, RET_DATA_SIZE);
}
}
private static class BranchData extends JumpData {
static final int BRANCH_DATA_SIZE = cellIndexToOffset(3);
static final int NOT_TAKEN_COUNT_OFFSET = cellIndexToOffset(config.branchDataNotTakenOffset);
private static final int BRANCH_DATA_SIZE = cellIndexToOffset(3);
private static final int NOT_TAKEN_COUNT_OFFSET = cellIndexToOffset(config.branchDataNotTakenOffset);
static class BranchData extends JumpData {
BranchData() {
super(Tag.BranchData, BRANCH_DATA_SIZE);
BranchData(HotSpotVMConfig config, int tag) {
super(config, tag, BRANCH_DATA_SIZE);
}
@Override
@ -768,13 +656,13 @@ public final class HotSpotMethodData {
}
}
private static class ArrayData extends AbstractMethodData {
static final int ARRAY_DATA_LENGTH_OFFSET = cellIndexToOffset(config.arrayDataArrayLenOffset);
static final int ARRAY_DATA_START_OFFSET = cellIndexToOffset(config.arrayDataArrayStartOffset);
private static final int ARRAY_DATA_LENGTH_OFFSET = cellIndexToOffset(config.arrayDataArrayLenOffset);
protected static final int ARRAY_DATA_START_OFFSET = cellIndexToOffset(config.arrayDataArrayStartOffset);
static class ArrayData extends HotSpotMethodDataAccessor {
ArrayData(Tag tag, int staticSize) {
super(tag, staticSize);
ArrayData(HotSpotVMConfig config, int tag, int staticSize) {
super(config, tag, staticSize);
}
@Override
@ -792,16 +680,16 @@ public final class HotSpotMethodData {
}
}
private static class MultiBranchData extends ArrayData {
static final int MULTI_BRANCH_DATA_SIZE = cellIndexToOffset(1);
static final int MULTI_BRANCH_DATA_ROW_SIZE_IN_CELLS = config.multiBranchDataPerCaseCellCount;
static final int MULTI_BRANCH_DATA_ROW_SIZE = cellsToBytes(MULTI_BRANCH_DATA_ROW_SIZE_IN_CELLS);
static final int MULTI_BRANCH_DATA_FIRST_COUNT_OFFSET = ARRAY_DATA_START_OFFSET + cellsToBytes(0);
static final int MULTI_BRANCH_DATA_FIRST_DISPLACEMENT_OFFSET = ARRAY_DATA_START_OFFSET + cellsToBytes(1);
private static final int MULTI_BRANCH_DATA_SIZE = cellIndexToOffset(1);
private static final int MULTI_BRANCH_DATA_ROW_SIZE_IN_CELLS = config.multiBranchDataPerCaseCellCount;
private static final int MULTI_BRANCH_DATA_ROW_SIZE = cellsToBytes(MULTI_BRANCH_DATA_ROW_SIZE_IN_CELLS);
private static final int MULTI_BRANCH_DATA_FIRST_COUNT_OFFSET = ARRAY_DATA_START_OFFSET + cellsToBytes(0);
private static final int MULTI_BRANCH_DATA_FIRST_DISPLACEMENT_OFFSET = ARRAY_DATA_START_OFFSET + cellsToBytes(1);
static class MultiBranchData extends ArrayData {
MultiBranchData() {
super(Tag.MultiBranchData, MULTI_BRANCH_DATA_SIZE);
MultiBranchData(HotSpotVMConfig config, int tag) {
super(config, tag, MULTI_BRANCH_DATA_SIZE);
}
@Override
@ -878,18 +766,18 @@ public final class HotSpotMethodData {
}
}
private static class ArgInfoData extends ArrayData {
static final int ARG_INFO_DATA_SIZE = cellIndexToOffset(1);
private static final int ARG_INFO_DATA_SIZE = cellIndexToOffset(1);
static class ArgInfoData extends ArrayData {
ArgInfoData() {
super(Tag.ArgInfoData, ARG_INFO_DATA_SIZE);
ArgInfoData(HotSpotVMConfig config, int tag) {
super(config, tag, ARG_INFO_DATA_SIZE);
}
}
private static class UnknownProfileData extends AbstractMethodData {
UnknownProfileData(Tag tag) {
super(tag, 0);
static class UnknownProfileData extends HotSpotMethodDataAccessor {
UnknownProfileData(HotSpotVMConfig config, int tag) {
super(config, tag, 0);
}
@Override
@ -900,7 +788,6 @@ public final class HotSpotMethodData {
@Override
public StringBuilder appendTo(StringBuilder sb, HotSpotMethodData data, int pos) {
// TODO Auto-generated method stub
return null;
}
}
@ -912,4 +799,41 @@ public final class HotSpotMethodData {
public int getCompiledIRSize() {
return UNSAFE.getInt(metaspaceMethodData + config.methodDataIRSizeOffset);
}
// sorted by tag
// @formatter:off
static final HotSpotMethodDataAccessor[] PROFILE_DATA_ACCESSORS = {
null,
new BitData(config, config.dataLayoutBitDataTag),
new CounterData(config, config.dataLayoutCounterDataTag),
new JumpData(config, config.dataLayoutJumpDataTag),
new ReceiverTypeData(config, config.dataLayoutReceiverTypeDataTag),
new VirtualCallData(config, config.dataLayoutVirtualCallDataTag),
new RetData(config, config.dataLayoutRetDataTag),
new BranchData(config, config.dataLayoutBranchDataTag),
new MultiBranchData(config, config.dataLayoutMultiBranchDataTag),
new ArgInfoData(config, config.dataLayoutArgInfoDataTag),
new UnknownProfileData(config, config.dataLayoutCallTypeDataTag),
new VirtualCallTypeData(config, config.dataLayoutVirtualCallTypeDataTag),
new UnknownProfileData(config, config.dataLayoutParametersTypeDataTag),
new UnknownProfileData(config, config.dataLayoutSpeculativeTrapDataTag),
};
private static boolean checkAccessorTags() {
int expectedTag = 0;
for (HotSpotMethodDataAccessor accessor : PROFILE_DATA_ACCESSORS) {
if (expectedTag ==0 ) {
assert accessor == null;
} else {
assert accessor.tag == expectedTag: expectedTag + " != " + accessor.tag + " " + accessor;
}
expectedTag++;
}
return true;
}
static {
assert checkAccessorTags();
}
// @formatter:on
}

View File

@ -1,110 +1,129 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.hotspot;
import static jdk.vm.ci.hotspot.HotSpotVMConfig.config;
import jdk.vm.ci.meta.JavaMethodProfile;
import jdk.vm.ci.meta.JavaTypeProfile;
import jdk.vm.ci.meta.ProfilingInfo;
import jdk.vm.ci.meta.TriState;
/**
* Interface for accessor objects that encapsulate the logic for accessing the different kinds of
* data in a HotSpot methodDataOop. This interface is similar to the interface {@link ProfilingInfo}
* , but most methods require a MethodDataObject and the exact position within the methodData.
* Base class for accessing the different kinds of data in a HotSpot {@code MethodData}. This is
* similar to {@link ProfilingInfo}, but most methods require a {@link HotSpotMethodData} and the
* exact position within the method data.
*/
public interface HotSpotMethodDataAccessor {
abstract class HotSpotMethodDataAccessor {
/**
* {@code DataLayout} tag values.
*/
enum Tag {
No(config().dataLayoutNoTag),
BitData(config().dataLayoutBitDataTag),
CounterData(config().dataLayoutCounterDataTag),
JumpData(config().dataLayoutJumpDataTag),
ReceiverTypeData(config().dataLayoutReceiverTypeDataTag),
VirtualCallData(config().dataLayoutVirtualCallDataTag),
RetData(config().dataLayoutRetDataTag),
BranchData(config().dataLayoutBranchDataTag),
MultiBranchData(config().dataLayoutMultiBranchDataTag),
ArgInfoData(config().dataLayoutArgInfoDataTag),
CallTypeData(config().dataLayoutCallTypeDataTag),
VirtualCallTypeData(config().dataLayoutVirtualCallTypeDataTag),
ParametersTypeData(config().dataLayoutParametersTypeDataTag),
SpeculativeTrapData(config().dataLayoutSpeculativeTrapDataTag);
final int tag;
final int staticSize;
final HotSpotVMConfig config;
private final int value;
Tag(int value) {
this.value = value;
}
public int getValue() {
return value;
}
public static Tag getEnum(int value) {
Tag result = values()[value];
assert value == result.value;
return result;
}
protected HotSpotMethodDataAccessor(HotSpotVMConfig config, int tag, int staticSize) {
this.config = config;
this.tag = tag;
this.staticSize = staticSize;
}
/**
* Returns the {@link Tag} stored in the LayoutData header.
* Returns the tag stored in the LayoutData header.
*
* @return tag stored in the LayoutData header
*/
Tag getTag();
int getTag() {
return tag;
}
static int readTag(HotSpotVMConfig config, HotSpotMethodData data, int position) {
final int tag = data.readUnsignedByte(position, config.dataLayoutTagOffset);
assert tag >= config.dataLayoutNoTag && tag <= config.dataLayoutSpeculativeTrapDataTag : "profile data tag out of bounds: " + tag;
return tag;
}
/**
* Returns the BCI stored in the LayoutData header.
*
* @return An integer &ge; 0 and &le; Short.MAX_VALUE, or -1 if not supported.
* @return an integer between 0 and {@link Short#MAX_VALUE} inclusive, or -1 if not supported
*/
int getBCI(HotSpotMethodData data, int position);
int getBCI(HotSpotMethodData data, int position) {
return data.readUnsignedShort(position, config.dataLayoutBCIOffset);
}
/**
* Computes the size for the specific data at the given position.
*
* @return An integer &gt; 0.
* @return a value greater than 0
*/
int getSize(HotSpotMethodData data, int position);
final int getSize(HotSpotMethodData data, int position) {
int size = staticSize + getDynamicSize(data, position);
// Sanity check against VM
int vmSize = HotSpotJVMCIRuntime.runtime().compilerToVm.methodDataProfileDataSize(data.metaspaceMethodData, position);
assert size == vmSize : size + " != " + vmSize;
return size;
}
JavaTypeProfile getTypeProfile(HotSpotMethodData data, int position);
TriState getExceptionSeen(HotSpotMethodData data, int position) {
final int EXCEPTIONS_MASK = 1 << config.bitDataExceptionSeenFlag;
return TriState.get((getFlags(data, position) & EXCEPTIONS_MASK) != 0);
}
JavaMethodProfile getMethodProfile(HotSpotMethodData data, int position);
/**
* @param data
* @param position
*/
JavaTypeProfile getTypeProfile(HotSpotMethodData data, int position) {
return null;
}
double getBranchTakenProbability(HotSpotMethodData data, int position);
/**
* @param data
* @param position
*/
JavaMethodProfile getMethodProfile(HotSpotMethodData data, int position) {
return null;
}
double[] getSwitchProbabilities(HotSpotMethodData data, int position);
/**
* @param data
* @param position
*/
double getBranchTakenProbability(HotSpotMethodData data, int position) {
return -1;
}
TriState getExceptionSeen(HotSpotMethodData data, int position);
/**
* @param data
* @param position
*/
double[] getSwitchProbabilities(HotSpotMethodData data, int position) {
return null;
}
TriState getNullSeen(HotSpotMethodData data, int position);
/**
* @param data
* @param position
*/
int getExecutionCount(HotSpotMethodData data, int position) {
return -1;
}
int getExecutionCount(HotSpotMethodData data, int position);
/**
* @param data
* @param position
*/
TriState getNullSeen(HotSpotMethodData data, int position) {
return TriState.UNKNOWN;
}
StringBuilder appendTo(StringBuilder sb, HotSpotMethodData data, int pos);
}
protected int getFlags(HotSpotMethodData data, int position) {
return data.readUnsignedByte(position, config.dataLayoutFlagsOffset);
}
/**
* @param data
* @param position
*/
protected int getDynamicSize(HotSpotMethodData data, int position) {
return 0;
}
abstract StringBuilder appendTo(StringBuilder sb, HotSpotMethodData data, int pos);
}

View File

@ -27,13 +27,25 @@ import java.util.Arrays;
import jdk.vm.ci.code.Location;
import jdk.vm.ci.code.ReferenceMap;
/**
* Describes where the object references are in machine state, compliant with what HotSpot expects.
*/
public final class HotSpotReferenceMap extends ReferenceMap {
final Location[] objects;
final Location[] derivedBase;
final int[] sizeInBytes;
final int maxRegisterSize;
private final Location[] objects;
private final Location[] derivedBase;
private final int[] sizeInBytes;
private final int maxRegisterSize;
/**
*
* @param objects This array is now owned by this object and must not be mutated by the caller.
* @param derivedBase This array is now owned by this object and must not be mutated by the
* caller.
* @param sizeInBytes This array is now owned by this object and must not be mutated by the
* caller.
*/
@SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "caller transfers ownership of `objects`, `derivedBase` and `sizeInBytes`")
public HotSpotReferenceMap(Location[] objects, Location[] derivedBase, int[] sizeInBytes, int maxRegisterSize) {
this.objects = objects;
this.derivedBase = derivedBase;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,12 +28,8 @@ import java.lang.annotation.Annotation;
import java.lang.reflect.Field;
import jdk.internal.vm.annotation.Stable;
import jdk.vm.ci.common.JVMCIError;
import jdk.vm.ci.hotspot.HotSpotJVMCIRuntime.Option;
import jdk.vm.ci.meta.JavaType;
import jdk.vm.ci.meta.MetaAccessProvider;
import jdk.vm.ci.meta.ModifiersProvider;
import jdk.vm.ci.meta.ResolvedJavaField;
import jdk.vm.ci.meta.ResolvedJavaType;
/**

View File

@ -132,10 +132,20 @@ final class HotSpotResolvedObjectTypeImpl extends HotSpotResolvedJavaType implem
return UNSAFE.getInt(javaClass, config().klassOffset) & 0xFFFFFFFFL;
}
@Override
public long getMetaspacePointer() {
return getMetaspaceKlass();
}
/**
* The Klass* for this object is kept alive by the direct reference to {@link #javaClass} so no
* extra work is required.
*/
@Override
public boolean isRegistered() {
return true;
}
@Override
public int getModifiers() {
if (isArray()) {
@ -428,7 +438,13 @@ final class HotSpotResolvedObjectTypeImpl extends HotSpotResolvedJavaType implem
}
public HotSpotConstantPool getConstantPool() {
if (constantPool == null) {
if (constantPool == null || !isArray() && UNSAFE.getAddress(getMetaspaceKlass() + config().instanceKlassConstantsOffset) != constantPool.getMetaspaceConstantPool()) {
/*
* If the pointer to the ConstantPool has changed since this was last read refresh the
* HotSpotConstantPool wrapper object. This ensures that uses of the constant pool are
* operating on the latest one and that HotSpotResolvedJavaMethodImpls will be able to
* use the shared copy instead of creating their own instance.
*/
constantPool = compilerToVM().getConstantPool(this, config().instanceKlassConstantsOffset);
}
return constantPool;
@ -575,7 +591,8 @@ final class HotSpotResolvedObjectTypeImpl extends HotSpotResolvedJavaType implem
// Get Klass::_fields
final long metaspaceFields = UNSAFE.getAddress(getMetaspaceKlass() + config.instanceKlassFieldsOffset);
assert config.fieldInfoFieldSlots == 6 : "revisit the field parsing code";
metaspaceData = metaspaceFields + config.arrayU2DataOffset + config.fieldInfoFieldSlots * Short.BYTES * index;
int offset = config.fieldInfoFieldSlots * Short.BYTES * index;
metaspaceData = metaspaceFields + config.arrayU2DataOffset + offset;
}
private int getAccessFlags() {
@ -603,7 +620,8 @@ final class HotSpotResolvedObjectTypeImpl extends HotSpotResolvedJavaType implem
* on top an array of Java shorts.
*/
private int readFieldSlot(int index) {
return UNSAFE.getChar(metaspaceData + Short.BYTES * index);
int offset = Short.BYTES * index;
return UNSAFE.getChar(metaspaceData + offset);
}
/**
@ -612,7 +630,7 @@ final class HotSpotResolvedObjectTypeImpl extends HotSpotResolvedJavaType implem
*/
public String getName() {
final int nameIndex = getNameIndex();
return isInternal() ? HotSpotVmSymbols.symbolAt(nameIndex) : getConstantPool().lookupUtf8(nameIndex);
return isInternal() ? config().symbolAt(nameIndex) : getConstantPool().lookupUtf8(nameIndex);
}
/**
@ -621,7 +639,7 @@ final class HotSpotResolvedObjectTypeImpl extends HotSpotResolvedJavaType implem
*/
public String getSignature() {
final int signatureIndex = getSignatureIndex();
return isInternal() ? HotSpotVmSymbols.symbolAt(signatureIndex) : getConstantPool().lookupUtf8(signatureIndex);
return isInternal() ? config().symbolAt(signatureIndex) : getConstantPool().lookupUtf8(signatureIndex);
}
public JavaType getType() {
@ -642,6 +660,7 @@ final class HotSpotResolvedObjectTypeImpl extends HotSpotResolvedJavaType implem
}
}
@SuppressFBWarnings(value = "SE_COMPARATOR_SHOULD_BE_SERIALIZABLE", justification = "comparator is only used transiently")
private static class OffsetComparator implements java.util.Comparator<HotSpotResolvedJavaField> {
@Override
public int compare(HotSpotResolvedJavaField o1, HotSpotResolvedJavaField o2) {

View File

@ -35,7 +35,7 @@ public class HotSpotSpeculationLog implements SpeculationLog {
/** Written by the C++ code that performs deoptimization. */
private volatile Object lastFailed;
/** All speculations that have been a deoptimization reason. */
/** All speculations that have caused a deoptimization. */
private Set<SpeculationReason> failedSpeculations;
/** Strong references to all reasons embedded in the current nmethod. */
@ -54,7 +54,7 @@ public class HotSpotSpeculationLog implements SpeculationLog {
}
@Override
public boolean maySpeculate(SpeculationReason reason) {
public synchronized boolean maySpeculate(SpeculationReason reason) {
if (failedSpeculations != null && failedSpeculations.contains(reason)) {
return false;
}

View File

@ -0,0 +1,327 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.hotspot;
import jdk.vm.ci.common.JVMCIError;
/**
* Access to VM configuration data.
*/
public class HotSpotVMConfigAccess {
/**
* Gets the address of a C++ symbol.
*
* @param name name of C++ symbol
* @param notPresent if non-null and the symbol is not present then this value is returned
* @return the address of the symbol
* @throws JVMCIError if the symbol is not present and {@code notPresent == null}
*/
public long getAddress(String name, Long notPresent) {
Long entry = store.vmAddresses.get(name);
if (entry == null) {
if (notPresent != null) {
return notPresent;
}
throw new JVMCIError("expected VM symbol not found: " + name);
}
return entry;
}
/**
* Gets the address of a C++ symbol.
*
* @param name name of C++ symbol
* @return the address of the symbol
* @throws JVMCIError if the symbol is not present
*/
public long getAddress(String name) {
return getAddress(name, null);
}
/**
* Gets the size of a C++ type.
*
* @param name name of the type
* @return the size in bytes of the requested field
* @throws JVMCIError if the field is not present and {@code notPresent} is null
*/
public int getTypeSize(String name) {
Long entry = store.vmTypeSizes.get(name);
if (entry == null) {
throw new JVMCIError("expected VM type not found: " + name);
}
return (int) (long) entry;
}
/**
* Gets the value of a C++ constant.
*
* @param name name of the constant (e.g., {@code "frame::arg_reg_save_area_bytes"})
* @param type the boxed type to which the constant value will be converted
* @param notPresent if non-null and the constant is not present then this value is returned
* @return the constant value converted to {@code type}
* @throws JVMCIError if the constant is not present and {@code notPresent == null}
*/
public <T> T getConstant(String name, Class<T> type, T notPresent) {
Long c = store.vmConstants.get(name);
if (c == null) {
if (notPresent != null) {
return notPresent;
}
throw new JVMCIError("expected VM constant not found: " + name);
}
return type.cast(convertValue(name, type, c, null));
}
/**
* Gets the value of a C++ constant.
*
* @param name name of the constant (e.g., {@code "frame::arg_reg_save_area_bytes"})
* @param type the boxed type to which the constant value will be converted
* @return the constant value converted to {@code type}
* @throws JVMCIError if the constant is not present
*/
public <T> T getConstant(String name, Class<T> type) {
return getConstant(name, type, null);
}
/**
* Gets the offset of a non-static C++ field.
*
* @param name fully qualified name of the field
* @param type the boxed type to which the offset value will be converted (must be
* {@link Integer} or {@link Long})
* @param cppType if non-null, the expected C++ type of the field (e.g., {@code "HeapWord*"})
* @param notPresent if non-null and the field is not present then this value is returned
* @return the offset in bytes of the requested field
* @throws JVMCIError if the field is static or not present and {@code notPresent} is null
*/
public <T> T getFieldOffset(String name, Class<T> type, String cppType, T notPresent) {
assert type == Integer.class || type == Long.class;
VMField entry = getField(name, cppType, notPresent == null);
if (entry == null) {
return notPresent;
}
if (entry.address != 0) {
throw new JVMCIError("cannot get offset of static field " + name);
}
return type.cast(convertValue(name, type, entry.offset, cppType));
}
/**
* Gets the offset of a non-static C++ field.
*
* @param name fully qualified name of the field
* @param type the boxed type to which the offset value will be converted (must be
* {@link Integer} or {@link Long})
* @param cppType if non-null, the expected C++ type of the field (e.g., {@code "HeapWord*"})
* @return the offset in bytes of the requested field
* @throws JVMCIError if the field is static or not present
*/
public <T> T getFieldOffset(String name, Class<T> type, String cppType) {
return getFieldOffset(name, type, cppType, null);
}
/**
* Gets the offset of a non-static C++ field.
*
* @param name fully qualified name of the field
* @param type the boxed type to which the offset value will be converted (must be
* {@link Integer} or {@link Long})
* @return the offset in bytes of the requested field
* @throws JVMCIError if the field is static or not present
*/
public <T> T getFieldOffset(String name, Class<T> type) {
return getFieldOffset(name, type, null, null);
}
/**
* Gets the address of a static C++ field.
*
* @param name fully qualified name of the field
* @param cppType if non-null, the expected C++ type of the field (e.g., {@code "HeapWord*"})
* @param notPresent if non-null and the field is not present then this value is returned
* @return the address of the requested field
* @throws JVMCIError if the field is not static or not present and {@code notPresent} is null
*/
public long getFieldAddress(String name, String cppType, Long notPresent) {
VMField entry = getField(name, cppType, notPresent == null);
if (entry == null) {
return notPresent;
}
if (entry.address == 0) {
throw new JVMCIError(name + " is not a static field");
}
return entry.address;
}
/**
* Gets the address of a static C++ field.
*
* @param name fully qualified name of the field
* @param cppType if non-null, the expected C++ type of the field (e.g., {@code "HeapWord*"})
* @return the address of the requested field
* @throws JVMCIError if the field is not static or not present
*/
public long getFieldAddress(String name, String cppType) {
return getFieldAddress(name, cppType, null);
}
/**
* Gets the value of a static C++ field.
*
* @param name fully qualified name of the field
* @param type the boxed type to which the constant value will be converted
* @param cppType if non-null, the expected C++ type of the field (e.g., {@code "HeapWord*"})
* @param notPresent if non-null and the field is not present then this value is returned
* @return the value of the requested field
* @throws JVMCIError if the field is not static or not present and {@code notPresent} is null
*/
public <T> T getFieldValue(String name, Class<T> type, String cppType, T notPresent) {
VMField entry = getField(name, cppType, notPresent == null);
if (entry == null) {
return notPresent;
}
if (entry.value == null) {
throw new JVMCIError(name + " is not a static field");
}
return type.cast(convertValue(name, type, entry.value, cppType));
}
/**
* Gets the value of a static C++ field.
*
* @param name fully qualified name of the field
* @param type the boxed type to which the constant value will be converted
* @param cppType if non-null, the expected C++ type of the field (e.g., {@code "HeapWord*"})
* @return the value of the requested field
* @throws JVMCIError if the field is not static or not present
*/
public <T> T getFieldValue(String name, Class<T> type, String cppType) {
return getFieldValue(name, type, cppType, null);
}
/**
* Gets the value of a static C++ field.
*
* @param name fully qualified name of the field
* @param type the boxed type to which the constant value will be converted
* @return the value of the requested field
* @throws JVMCIError if the field is not static or not present
*/
public <T> T getFieldValue(String name, Class<T> type) {
return getFieldValue(name, type, null, null);
}
/**
* Gets a C++ field.
*
* @param name fully qualified name of the field
* @param cppType if non-null, the expected C++ type of the field (e.g., {@code "HeapWord*"})
* @param required specifies if the field must be present
* @return the field
* @throws JVMCIError if the field is not present and {@code required == true}
*/
private VMField getField(String name, String cppType, boolean required) {
VMField entry = store.vmFields.get(name);
if (entry == null) {
if (!required) {
return null;
}
throw new JVMCIError("expected VM field not found: " + name);
}
// Make sure the native type is still the type we expect.
if (cppType != null && !cppType.equals(entry.type)) {
throw new JVMCIError("expected type " + cppType + " but VM field " + name + " is of type " + entry.type);
}
return entry;
}
/**
* Gets a VM flag value.
*
* @param name name of the flag (e.g., {@code "CompileTheWorldStartAt"})
* @param type the boxed type to which the flag's value will be converted
* @return the flag's value converted to {@code type} or {@code notPresent} if the flag is not
* present
* @throws JVMCIError if the flag is not present
*/
public <T> T getFlag(String name, Class<T> type) {
return getFlag(name, type, null);
}
/**
* Gets a VM flag value.
*
* @param name name of the flag (e.g., {@code "CompileTheWorldStartAt"})
* @param type the boxed type to which the flag's value will be converted
* @param notPresent if non-null and the flag is not present then this value is returned
* @return the flag's value converted to {@code type} or {@code notPresent} if the flag is not
* present
* @throws JVMCIError if the flag is not present and {@code notPresent == null}
*/
public <T> T getFlag(String name, Class<T> type, T notPresent) {
VMFlag entry = store.vmFlags.get(name);
if (entry == null) {
if (notPresent != null) {
return notPresent;
}
throw new JVMCIError("expected VM flag not found: " + name);
}
return type.cast(convertValue(name, type, entry.value, entry.type));
}
private static <T> Object convertValue(String name, Class<T> toType, Object value, String cppType) throws JVMCIError {
if (toType == Boolean.class) {
if (value instanceof String) {
return Boolean.valueOf((String) value);
} else if (value instanceof Boolean) {
return value;
} else if (value instanceof Long) {
return ((long) value) != 0;
}
} else if (toType == Byte.class) {
if (value instanceof Long) {
return (byte) (long) value;
}
} else if (toType == Integer.class) {
if (value instanceof Integer) {
return value;
} else if (value instanceof Long) {
return (int) (long) value;
}
} else if (toType == Long.class) {
return value;
}
throw new JVMCIError("cannot convert " + name + " of type " + value.getClass().getSimpleName() + (cppType == null ? "" : " [" + cppType + "]") + " to " + toType.getSimpleName());
}
private final HotSpotVMConfigStore store;
public HotSpotVMConfigAccess(HotSpotVMConfigStore store) {
this.store = store;
}
}

View File

@ -0,0 +1,153 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.hotspot;
import static jdk.vm.ci.common.InitTimer.timer;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import jdk.vm.ci.common.InitTimer;
/**
* Access to VM configuration data.
*/
public final class HotSpotVMConfigStore {
/**
* Gets the C++ symbols whose addresses are exposed by this object.
*
* @return an unmodifiable map from the symbol names to their addresses
*/
public Map<String, Long> getAddresses() {
return Collections.unmodifiableMap(vmAddresses);
}
/**
* Gets the C++ type sizes exposed by this object.
*
* @return an unmodifiable map from C++ type names to their sizes in bytes
*/
public Map<String, Long> getTypeSizes() {
return Collections.unmodifiableMap(vmTypeSizes);
}
/**
* Gets the C++ constants exposed by this object.
*
* @return an unmodifiable map from the names of C++ constants to their values
*/
public Map<String, Long> getConstants() {
return Collections.unmodifiableMap(vmConstants);
}
/**
* Gets the VM flags exposed by this object.
*
* @return an unmodifiable map from VM flag names to {@link VMFlag} objects
*/
public Map<String, VMFlag> getFlags() {
return Collections.unmodifiableMap(vmFlags);
}
/**
* Gets the C++ fields exposed by this object.
*
* @return an unmodifiable map from VM field names to {@link VMField} objects
*/
public Map<String, VMField> getFields() {
return Collections.unmodifiableMap(vmFields);
}
final HashMap<String, VMField> vmFields;
final HashMap<String, Long> vmTypeSizes;
final HashMap<String, Long> vmConstants;
final HashMap<String, Long> vmAddresses;
final HashMap<String, VMFlag> vmFlags;
/**
* Reads the database of VM info. The return value encodes the info in a nested object array
* that is described by the pseudo Java object {@code info} below:
*
* <pre>
* info = [
* VMField[] vmFields,
* [String name, Long size, ...] vmTypeSizes,
* [String name, Long value, ...] vmConstants,
* [String name, Long value, ...] vmAddresses,
* VMFlag[] vmFlags
* ]
* </pre>
*/
@SuppressWarnings("try")
HotSpotVMConfigStore(CompilerToVM compilerToVm) {
Object[] data;
try (InitTimer t = timer("CompilerToVm readConfiguration")) {
data = compilerToVm.readConfiguration();
}
assert data.length == 5 : data.length;
// @formatter:off
VMField[] vmFieldsInfo = (VMField[]) data[0];
Object[] vmTypesSizesInfo = (Object[]) data[1];
Object[] vmConstantsInfo = (Object[]) data[2];
Object[] vmAddressesInfo = (Object[]) data[3];
VMFlag[] vmFlagsInfo = (VMFlag[]) data[4];
vmFields = new HashMap<>(vmFieldsInfo.length);
vmTypeSizes = new HashMap<>(vmTypesSizesInfo.length);
vmConstants = new HashMap<>(vmConstantsInfo.length);
vmAddresses = new HashMap<>(vmAddressesInfo.length);
vmFlags = new HashMap<>(vmFlagsInfo.length);
// @formatter:on
try (InitTimer t = timer("HotSpotVMConfigStore<init> fill maps")) {
for (VMField vmField : vmFieldsInfo) {
vmFields.put(vmField.name, vmField);
}
for (int i = 0; i < vmTypesSizesInfo.length / 2; i++) {
String name = (String) vmTypesSizesInfo[i * 2];
Long size = (Long) vmTypesSizesInfo[i * 2 + 1];
vmTypeSizes.put(name, size);
}
for (int i = 0; i < vmConstantsInfo.length / 2; i++) {
String name = (String) vmConstantsInfo[i * 2];
Long value = (Long) vmConstantsInfo[i * 2 + 1];
vmConstants.put(name, value);
}
for (int i = 0; i < vmAddressesInfo.length / 2; i++) {
String name = (String) vmAddressesInfo[i * 2];
Long value = (Long) vmAddressesInfo[i * 2 + 1];
vmAddresses.put(name, value);
}
for (VMFlag vmFlag : vmFlagsInfo) {
vmFlags.put(vmFlag.name, vmFlag);
}
}
}
}

View File

@ -1,164 +0,0 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.hotspot;
import static java.lang.String.format;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Constructor;
import java.lang.reflect.Executable;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.Arrays;
import java.util.Objects;
import jdk.vm.ci.common.JVMCIError;
import jdk.internal.org.objectweb.asm.ClassReader;
import jdk.internal.org.objectweb.asm.ClassVisitor;
import jdk.internal.org.objectweb.asm.Label;
import jdk.internal.org.objectweb.asm.MethodVisitor;
import jdk.internal.org.objectweb.asm.Opcodes;
import jdk.internal.org.objectweb.asm.Type;
import jdk.internal.misc.Unsafe;
/**
* A {@link ClassVisitor} that verifies {@link HotSpotVMConfig} does not access {@link Unsafe} from
* any of its non-static, non-constructor methods. This ensures that a deserialized
* {@link HotSpotVMConfig} object does not perform any unsafe reads on addresses that are only valid
* in the context in which the object was serialized. Note that this does not catch cases where a
* client uses an address stored in a {@link HotSpotVMConfig} field.
*/
final class HotSpotVMConfigVerifier extends ClassVisitor {
public static boolean check() {
Class<?> cls = HotSpotVMConfig.class;
String classFilePath = "/" + cls.getName().replace('.', '/') + ".class";
try {
InputStream classfile = cls.getResourceAsStream(classFilePath);
ClassReader cr = new ClassReader(Objects.requireNonNull(classfile, "Could not find class file for " + cls.getName()));
ClassVisitor cv = new HotSpotVMConfigVerifier();
cr.accept(cv, 0);
return true;
} catch (IOException e) {
throw new JVMCIError(e);
}
}
/**
* Source file context for error reporting.
*/
String sourceFile = null;
/**
* Line number for error reporting.
*/
int lineNo = -1;
private static Class<?> resolve(String name) {
try {
return Class.forName(name.replace('/', '.'));
} catch (ClassNotFoundException e) {
throw new JVMCIError(e);
}
}
HotSpotVMConfigVerifier() {
super(Opcodes.ASM5);
}
@Override
public void visitSource(String source, String debug) {
this.sourceFile = source;
}
void verify(boolean condition, String message) {
if (!condition) {
error(message);
}
}
void error(String message) {
String errorMessage = format("%s:%d: %s is not allowed in the context of compilation replay. The unsafe access should be moved into the %s constructor and the result cached in a field",
sourceFile, lineNo, message, HotSpotVMConfig.class.getSimpleName());
throw new JVMCIError(errorMessage);
}
@Override
public MethodVisitor visitMethod(int access, String name, String d, String signature, String[] exceptions) {
if (!Modifier.isStatic(access) && Modifier.isPublic(access) && !name.equals("<init>")) {
return new MethodVisitor(Opcodes.ASM5) {
@Override
public void visitLineNumber(int line, Label start) {
lineNo = line;
}
private Executable resolveMethod(String owner, String methodName, String methodDesc) {
Class<?> declaringClass = resolve(owner);
while (declaringClass != null) {
if (methodName.equals("<init>")) {
for (Constructor<?> c : declaringClass.getDeclaredConstructors()) {
if (methodDesc.equals(Type.getConstructorDescriptor(c))) {
return c;
}
}
} else {
Type[] argumentTypes = Type.getArgumentTypes(methodDesc);
for (Method m : declaringClass.getDeclaredMethods()) {
if (m.getName().equals(methodName)) {
if (Arrays.equals(argumentTypes, Type.getArgumentTypes(m))) {
if (Type.getReturnType(methodDesc).equals(Type.getReturnType(m))) {
return m;
}
}
}
}
}
declaringClass = declaringClass.getSuperclass();
}
throw new NoSuchMethodError(owner + "." + methodName + methodDesc);
}
/**
* Checks whether a given method is allowed to be called.
*/
private boolean checkInvokeTarget(Executable method) {
if (method.getDeclaringClass().equals(Unsafe.class)) {
return false;
}
return true;
}
@Override
public void visitMethodInsn(int opcode, String owner, String methodName, String methodDesc, boolean itf) {
Executable callee = resolveMethod(owner, methodName, methodDesc);
verify(checkInvokeTarget(callee), "invocation of " + callee);
}
};
} else {
return null;
}
}
}

View File

@ -1,48 +0,0 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.hotspot;
import static jdk.vm.ci.hotspot.HotSpotJVMCIRuntime.runtime;
import static jdk.vm.ci.hotspot.UnsafeAccess.UNSAFE;
import jdk.internal.misc.Unsafe;
/**
* Class to access the C++ {@code vmSymbols} table.
*/
final class HotSpotVmSymbols {
/**
* Returns the symbol in the {@code vmSymbols} table at position {@code index} as {@link String}
* .
*
* @param index position in the symbol table
* @return the symbol at position id
*/
static String symbolAt(int index) {
HotSpotJVMCIRuntimeProvider runtime = runtime();
HotSpotVMConfig config = runtime.getConfig();
assert config.vmSymbolsFirstSID <= index && index < config.vmSymbolsSIDLimit : "index " + index + " is out of bounds";
assert config.symbolPointerSize == Unsafe.ADDRESS_SIZE : "the following address read is broken";
return runtime.getCompilerToVM().getSymbol(UNSAFE.getAddress(config.vmSymbolsSymbols + index * config.symbolPointerSize));
}
}

View File

@ -23,7 +23,8 @@
package jdk.vm.ci.hotspot;
/**
* A tag interface indicating that this type is a wrapper around a HotSpot metaspace object.
* A tag interface indicating that this type is a wrapper around a HotSpot metaspace object that
* requires GC interaction to keep alive.
*
* It would preferable if this were the base class containing the pointer but that would require
* mixins since most of the wrapper types have complex supertype hierarchies.
@ -31,4 +32,18 @@ package jdk.vm.ci.hotspot;
interface MetaspaceWrapperObject {
long getMetaspacePointer();
/**
* Check if this object is properly registered for metadata tracking. All classes which
* implement this interface must be registered with the
* {@link HotSpotJVMCIMetaAccessContext#add} unless they are kept alive through other means.
* Currently the only type which doesn't require explicit registration is
* {@link HotSpotResolvedObjectTypeImpl} since it's kept alive by references to the
* {@link Class}.
*
* @return true if this object is properly registered for meta data tracking.
*/
default boolean isRegistered() {
return HotSpotJVMCIRuntime.runtime().metaAccessContext.isRegistered(this);
}
}

View File

@ -0,0 +1,92 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.hotspot;
/**
* Describes a C++ field exposed via {@link HotSpotVMConfigAccess}.
*/
public final class VMField {
/**
* Fully qualified name of the represented field (e.g., "Klass::_name").
*/
public final String name;
/**
* The represented field's type (e.g., "Symbol*"). This may be {@code null}.
*/
public final String type;
/**
* If represented field is non-static, this is its offset within the containing structure.
*/
public final long offset;
/**
* If represented field is static, this is its address. Otherwise, this field is 0.
*/
public final long address;
/**
* Value of the field represented as a boxed long; only valid for non-oop static fields. This
* value is only captured once, during JVMCI initialization. If {@link #type} cannot be
* meaningfully (e.g., a struct) or safely (e.g., an oop) expressed as a boxed long, this is
* {@code null}.
*/
public final Long value;
/**
* Determines if the represented field is static.
*/
public boolean isStatic() {
return address != 0;
}
/**
* Creates a description of a non-static field.
*/
public VMField(String name, String type, long offset) {
this.name = name;
this.type = type;
this.offset = offset;
this.address = 0;
this.value = null;
}
/**
* Creates a description of a static field.
*/
public VMField(String name, String type, long address, Long value) {
this.name = name;
this.type = type;
this.offset = 0;
this.address = address;
this.value = value;
}
@Override
public String toString() {
String val = value == null ? "null" : String.format("0x%x", value);
return String.format("Field[name=%s, type=%s, offset=%d, address=0x%x, value=%s]", name, type, offset, address, val);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -20,41 +20,36 @@
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.hotspotvmconfig;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
package jdk.vm.ci.hotspot;
/**
* Refers to a C++ type in the VM.
* Describes a VM flag exposed via {@link HotSpotVMConfigAccess}.
*/
@Target(ElementType.FIELD)
@Retention(RetentionPolicy.RUNTIME)
public @interface HotSpotVMType {
public final class VMFlag {
/**
* Types of information this annotation can return.
* The name of the flag.
*/
enum Type {
/**
* Returns the size of the type (C++ {@code sizeof()}).
*/
SIZE;
public final String name;
/**
* The C++ type of the flag.
*/
public final String type;
/**
* The flag's value.
*/
public final Object value;
VMFlag(String name, String type, Object value) {
this.name = name;
this.type = type;
this.value = value;
}
/**
* Specifies what type of information to return.
*
* @see Type
*/
Type get();
/**
* Returns the name of the type.
*
* @return name of type
*/
String name();
@Override
public String toString() {
return String.format("Flag[type=%s, name=%s, value=%s]", type, name, value);
}
}

View File

@ -22,7 +22,6 @@
*/
package jdk.vm.ci.hotspot.services;
import jdk.vm.ci.hotspot.HotSpotVMConfig;
import jdk.vm.ci.runtime.services.JVMCICompilerFactory;
/**
@ -42,35 +41,54 @@ public abstract class HotSpotJVMCICompilerFactory extends JVMCICompilerFactory {
return null;
}
public enum CompilationLevelAdjustment {
/**
* No adjustment.
*/
None,
/**
* Adjust based on declaring class of method.
*/
ByHolder,
/**
* Adjust based on declaring class, name and signature of method.
*/
ByFullSignature
}
/**
* Determines if this object may want to adjust the compilation level for a method that is being
* scheduled by the VM for compilation. The legal return values and their meanings are:
* <ul>
* <li>0 - no adjustment</li>
* <li>1 - adjust based on declaring class of method</li>
* <li>2 - adjust based on declaring class, name and signature of method</li>
* </ul>
* scheduled by the VM for compilation.
*/
public int getCompilationLevelAdjustment(HotSpotVMConfig config) {
return config.compLevelAdjustmentNone;
public CompilationLevelAdjustment getCompilationLevelAdjustment() {
return CompilationLevelAdjustment.None;
}
public enum CompilationLevel {
None,
Simple,
LimitedProfile,
FullProfile,
FullOptimization
}
/**
* Potentially modifies the compilation level currently selected by the VM compilation policy
* for a method.
*
* @param config object for reading HotSpot {@code CompLevel} enum values
* @param declaringClass the class in which the method is declared
* @param name the name of the method or {@code null} depending on the value that was returned
* by {@link #getCompilationLevelAdjustment(HotSpotVMConfig)}
* by {@link #getCompilationLevelAdjustment()}
* @param signature the signature of the method or {@code null} depending on the value that was
* returned by {@link #getCompilationLevelAdjustment(HotSpotVMConfig)}
* returned by {@link #getCompilationLevelAdjustment()}
* @param isOsr specifies if the compilation being scheduled in an OSR compilation
* @param level the compilation level currently selected by the VM compilation policy
* @return the compilation level to use for the compilation being scheduled (must be a valid
* {@code CompLevel} enum value)
*/
public int adjustCompilationLevel(HotSpotVMConfig config, Class<?> declaringClass, String name, String signature, boolean isOsr, int level) {
public CompilationLevel adjustCompilationLevel(Class<?> declaringClass, String name, String signature, boolean isOsr, CompilationLevel level) {
throw new InternalError("Should not reach here");
}
}

View File

@ -1,60 +0,0 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.hotspotvmconfig;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Refers to a C++ address in the VM.
*/
@Target(ElementType.FIELD)
@Retention(RetentionPolicy.RUNTIME)
public @interface HotSpotVMAddress {
/**
* Returns the name of the symbol this address is referring to.
*
* @return name of symbol of this address
*/
String name();
/**
* List of architectures where this constant is required. Names are derived from
* {@link HotSpotVMConfig#getHostArchitectureName()}. An empty list implies that the constant is
* required on all architectures.
*/
@SuppressWarnings("javadoc")
String[] archs() default {};
/**
* List of operating systems where this constant is required. Names are derived from
* {@link HotSpotVMConfig#getHostOSName()}. An empty list implies that the constant is required
* on all operating systems.
*/
@SuppressWarnings("javadoc")
String[] os() default {};
}

View File

@ -1,85 +0,0 @@
/*
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.hotspotvmconfig;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Refers to a C++ field in the VM.
*/
@Target(ElementType.FIELD)
@Retention(RetentionPolicy.RUNTIME)
public @interface HotSpotVMField {
/**
* Types of information this annotation can return.
*/
enum Type {
/**
* Returns the offset of this field within the type. Only valid for instance fields.
*/
OFFSET,
/**
* Returns the absolute address of this field. Only valid for static fields.
*/
ADDRESS,
/**
* Returns the value of this field. Only valid for static fields.
*/
VALUE;
}
/**
* Specifies what type of information to return.
*
* @see Type
*/
Type get();
/**
* Returns the type name containing this field.
*
* @return name of containing type
*/
String type();
/**
* Returns the name of this field.
*
* @return name of field
*/
String name();
/**
* List of architectures where this constant is required. Names are derived from
* {@link HotSpotVMConfig#getHostArchitectureName()}. An empty list implies that the constant is
* required on all architectures.
*/
@SuppressWarnings("javadoc")
String[] archs() default {};
}

View File

@ -37,6 +37,12 @@ public abstract class AbstractJavaProfile<T extends AbstractProfiledItem<U>, U>
private final double notRecordedProbability;
private final T[] pitems;
/**
*
* @param notRecordedProbability
* @param pitems
*/
@SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "caller transfers ownership of the `pitems` array parameter")
public AbstractJavaProfile(double notRecordedProbability, T[] pitems) {
this.pitems = pitems;
assert !Double.isNaN(notRecordedProbability);

View File

@ -51,14 +51,14 @@ public abstract class AbstractProfiledItem<T> implements Comparable<AbstractProf
return probability;
}
/**
* Returns -1 if the {@linkplain #getProbability() probability} of this item is greater than
* {@code o}'s probability, 0 if there are equal otherwise 1.
*/
@Override
public int compareTo(AbstractProfiledItem<?> o) {
if (getProbability() > o.getProbability()) {
return -1;
} else if (getProbability() < o.getProbability()) {
return 1;
}
return 0;
// Need to swap the order of operands so that higher probabilities are sorted first
return Double.compare(o.getProbability(), getProbability());
}
@Override

View File

@ -48,16 +48,18 @@ public interface ConstantPool {
/**
* Looks up a reference to a field. If {@code opcode} is non-negative, then resolution checks
* specific to the bytecode it denotes are performed if the field is already resolved. Should
* specific to the bytecode it denotes are performed if the field is already resolved. Checks
* for some bytecodes require the method that contains the bytecode to be specified. Should
* any of these checks fail, an unresolved field reference is returned.
*
* @param cpi the constant pool index
* @param opcode the opcode of the instruction for which the lookup is being performed or
* {@code -1}
* @param method the method for which the lookup is being performed
* @return a reference to the field at {@code cpi} in this pool
* @throws ClassFormatError if the entry at {@code cpi} is not a field
*/
JavaField lookupField(int cpi, int opcode);
JavaField lookupField(int cpi, ResolvedJavaMethod method, int opcode);
/**
* Looks up a reference to a method. If {@code opcode} is non-negative, then resolution checks

View File

@ -22,24 +22,34 @@
*/
package jdk.vm.ci.meta;
/**
* Maps bytecode indexes to source line numbers.
*
* @see "https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.12"
*/
public class LineNumberTable {
private final int[] lineNumbers;
private final int[] bci;
/**
*
* @param lineNumbers an array or source line numbers. This array is now owned by this object
* and should not be mutated by the caller.
* @param bci an array of bytecode indexes the same length at {@code lineNumbers} whose entries
* are sorted in ascending order. This array is now owned by this object and must not
* be mutated by the caller.
*/
@SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "caller transfers ownership of `lineNumbers` and `bcis`")
public LineNumberTable(int[] lineNumbers, int[] bci) {
assert bci.length == lineNumbers.length;
this.lineNumbers = lineNumbers;
this.bci = bci;
}
public int[] getLineNumberEntries() {
return lineNumbers;
}
public int[] getBciEntries() {
return bci;
}
/**
* Gets a source line number for {@code atBci}.
*/
public int getLineNumber(int atBci) {
for (int i = 0; i < this.bci.length - 1; i++) {
if (this.bci[i] <= atBci && atBci < this.bci[i + 1]) {

View File

@ -22,6 +22,9 @@
*/
package jdk.vm.ci.meta;
/**
* Describes the type and bytecode index range in which a local variable is live.
*/
public class Local {
private final String name;

View File

@ -25,10 +25,19 @@ package jdk.vm.ci.meta;
import java.util.ArrayList;
import java.util.List;
/**
* @see "https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.13"
*/
public class LocalVariableTable {
private final Local[] locals;
/**
*
* @param locals array of objects describing local variables. This array is now owned by this
* object and must not be mutated by the caller.
*/
@SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "caller transfers ownership of `locals`")
public LocalVariableTable(Local[] locals) {
this.locals = locals;
}
@ -47,10 +56,6 @@ public class LocalVariableTable {
return result;
}
public Local[] getLocals() {
return locals;
}
public Local[] getLocalsAt(int bci) {
List<Local> result = new ArrayList<>();
for (Local l : locals) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -20,34 +20,21 @@
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.vm.ci.hotspotvmconfig;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
package jdk.vm.ci.meta;
/**
* Refers to a C++ flag in the VM.
* Used to suppress <a href="http://findbugs.sourceforge.net">FindBugs</a> warnings.
*/
@Target(ElementType.FIELD)
@Retention(RetentionPolicy.RUNTIME)
public @interface HotSpotVMFlag {
@interface SuppressFBWarnings {
/**
* The set of FindBugs
* <a href="http://findbugs.sourceforge.net/bugDescriptions.html">warnings</a> that are to be
* suppressed in annotated element. The value can be a bug category, kind or pattern.
*/
String[] value();
/**
* Returns the name of this flag.
*
* @return name of flag.
* Reason why the warning is suppressed.
*/
String name();
/**
* List of architectures where this constant is required. Names are derived from
* {@link HotSpotVMConfig#getHostArchitectureName()}. An empty list implies that the constant is
* required on all architectures.
*/
@SuppressWarnings("javadoc")
String[] archs() default {};
boolean optional() default false;
String justification();
}

View File

@ -65,7 +65,7 @@ public abstract class Value {
* {@link #toString()} implementation of subclasses.
*/
protected final String getKindSuffix() {
return "|" + getPlatformKind().getTypeChar();
return "|" + valueKind.getKindSuffix();
}
public final ValueKind<?> getValueKind() {

View File

@ -60,6 +60,11 @@ public abstract class ValueKind<K extends ValueKind<K>> {
public IllegalValueKind changeType(PlatformKind newPlatformKind) {
return this;
}
@Override
public String toString() {
return "ILLEGAL";
}
}
/**
@ -82,4 +87,13 @@ public abstract class ValueKind<K extends ValueKind<K>> {
* override this to preserve the additional information added by the compiler.
*/
public abstract K changeType(PlatformKind newPlatformKind);
/**
* Returns a String representation of the kind, which will be included at the end of
* {@link Value#toString()} implementation. Defaults to {@link #toString()} but can be
* overridden to provide something more specific.
*/
public String getKindSuffix() {
return toString();
}
}

View File

@ -32,6 +32,7 @@ import java.util.Set;
import jdk.vm.ci.code.Architecture;
import jdk.vm.ci.code.Register;
import jdk.vm.ci.code.Register.RegisterCategory;
import jdk.vm.ci.code.RegisterArray;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.PlatformKind;
@ -179,35 +180,35 @@ public class SPARC extends Architecture {
public static final Register q60 = new Register(111, getQuadncoding(60), "q60", FPUq);
// @formatter:off
public static final Register[] cpuRegisters = {
public static final RegisterArray cpuRegisters = new RegisterArray(
g0, g1, g2, g3, g4, g5, g6, g7,
o0, o1, o2, o3, o4, o5, o6, o7,
l0, l1, l2, l3, l4, l5, l6, l7,
i0, i1, i2, i3, i4, i5, i6, i7
};
);
public static final Register[] fpusRegisters = {
public static final RegisterArray fpusRegisters = new RegisterArray(
f0, f1, f2, f3, f4, f5, f6, f7,
f8, f9, f10, f11, f12, f13, f14, f15,
f16, f17, f18, f19, f20, f21, f22, f23,
f24, f25, f26, f27, f28, f29, f30, f31
};
);
public static final Register[] fpudRegisters = {
public static final RegisterArray fpudRegisters = new RegisterArray(
d0, d2, d4, d6, d8, d10, d12, d14,
d16, d18, d20, d22, d24, d26, d28, d30,
d32, d34, d36, d38, d40, d42, d44, d46,
d48, d50, d52, d54, d56, d58, d60, d62
};
);
public static final Register[] fpuqRegisters = {
public static final RegisterArray fpuqRegisters = new RegisterArray(
q0, q4, q8, q12,
q16, q20, q24, q28,
q32, q36, q40, q44,
q48, q52, q56, q60,
};
q48, q52, q56, q60
);
public static final Register[] allRegisters = {
public static final RegisterArray allRegisters = new RegisterArray(
g0, g1, g2, g3, g4, g5, g6, g7,
o0, o1, o2, o3, o4, o5, o6, o7,
l0, l1, l2, l3, l4, l5, l6, l7,
@ -226,8 +227,8 @@ public class SPARC extends Architecture {
q0, q4, q8, q12,
q16, q20, q24, q28,
q32, q36, q40, q44,
q48, q52, q56, q60,
};
q48, q52, q56, q60
);
// @formatter:on
/**
@ -248,7 +249,7 @@ public class SPARC extends Architecture {
}
@Override
public Register[] getAvailableValueRegisters() {
public RegisterArray getAvailableValueRegisters() {
return allRegisters;
}

View File

@ -156,8 +156,6 @@ int os::Aix::_on_pase = -1;
// SS - service pack, if known, 0 otherwise
uint32_t os::Aix::_os_version = 0;
int os::Aix::_stack_page_size = -1;
// -1 = uninitialized, 0 - no, 1 - yes
int os::Aix::_xpg_sus_mode = -1;
@ -1499,7 +1497,6 @@ void os::print_memory_info(outputStream* st) {
g_multipage_support.error);
st->cr();
st->print_cr(" os::vm_page_size: %s", describe_pagesize(os::vm_page_size()));
// not used in OpenJDK st->print_cr(" os::stack_page_size: %s", describe_pagesize(os::stack_page_size()));
// print out LDR_CNTRL because it affects the default page sizes
const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
@ -3451,10 +3448,6 @@ void os::init(void) {
FLAG_SET_ERGO(bool, Use64KPages, true);
}
// Short-wire stack page size to base page size; if that works, we just remove
// that stack page size altogether.
Aix::_stack_page_size = Aix::_page_size;
// For now UseLargePages is just ignored.
FLAG_SET_ERGO(bool, UseLargePages, false);
_page_sizes[0] = 0;
@ -3589,15 +3582,15 @@ jint os::init_2(void) {
// Check minimum allowable stack size for thread creation and to initialize
// the java system classes, including StackOverflowError - depends on page
// size. Add a page for compiler2 recursion in main thread.
// Add in 2*BytesPerWord times page size to account for VM stack during
// size. Add two 4K pages for compiler2 recursion in main thread.
// Add in 4*BytesPerWord 4K pages to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
JavaThread::stack_guard_zone_size() +
JavaThread::stack_shadow_zone_size() +
(2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
(4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::vm_page_size());
size_t threadStackSizeInBytes = ThreadStackSize * K;
if (threadStackSizeInBytes != 0 &&

View File

@ -49,9 +49,6 @@ class Aix {
static Mutex* _createThread_lock;
static int _page_size;
// Page size of newly created pthreads.
static int _stack_page_size;
// -1 = uninitialized, 0 = AIX, 1 = OS/400 (PASE)
static int _on_pase;
@ -113,15 +110,6 @@ class Aix {
return _page_size;
}
// Accessor methods for stack page size which may be different from usual page size.
static int stack_page_size(void) {
assert(_stack_page_size != -1, "not initialized");
return _stack_page_size;
}
// This is used to scale stack space (guard pages etc.). The name is somehow misleading.
static int vm_default_page_size(void ) { return 8*K; }
static address ucontext_get_pc(const ucontext_t* uc);
static intptr_t* ucontext_get_sp(const ucontext_t* uc);
static intptr_t* ucontext_get_fp(const ucontext_t* uc);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,7 +52,7 @@ inline bool os::uses_stack_guard_pages() {
// Whether or not calling code should/can commit/uncommit stack pages
// before guarding them. Answer for AIX is definitly no, because memory
// is automatically committed on touch.
inline bool os::allocate_stack_guard_pages() {
inline bool os::must_commit_stack_guard_pages() {
assert(uses_stack_guard_pages(), "sanity check");
return false;
}
@ -65,7 +65,7 @@ inline void os::pd_split_reserved_memory(char *base, size_t size,
}
// Bang the shadow pages if they need to be touched to be mapped.
inline void os::map_stack_shadow_pages() {
inline void os::map_stack_shadow_pages(address sp) {
}
inline void os::dll_unload(void *lib) {

View File

@ -3491,13 +3491,15 @@ jint os::init_2(void) {
// Check minimum allowable stack size for thread creation and to initialize
// the java system classes, including StackOverflowError - depends on page
// size. Add a page for compiler2 recursion in main thread.
// Add in 2*BytesPerWord times page size to account for VM stack during
// size. Add two 4K pages for compiler2 recursion in main thread.
// Add in 4*BytesPerWord 4K pages to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
os::Bsd::min_stack_allowed = MAX2(os::Bsd::min_stack_allowed,
JavaThread::stack_guard_zone_size() +
JavaThread::stack_shadow_zone_size() +
(2*BytesPerWord COMPILER2_PRESENT(+1)) * Bsd::page_size());
(4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
os::Bsd::min_stack_allowed = align_size_up(os::Bsd::min_stack_allowed, os::vm_page_size());
size_t threadStackSizeInBytes = ThreadStackSize * K;
if (threadStackSizeInBytes != 0 &&

View File

@ -47,7 +47,7 @@ inline bool os::uses_stack_guard_pages() {
return true;
}
inline bool os::allocate_stack_guard_pages() {
inline bool os::must_commit_stack_guard_pages() {
assert(uses_stack_guard_pages(), "sanity check");
#if !defined(__FreeBSD__) || __FreeBSD__ < 5
// Since FreeBSD 4 uses malloc() for allocating the thread stack
@ -68,7 +68,7 @@ inline void os::pd_split_reserved_memory(char *base, size_t size,
// Bang the shadow pages if they need to be touched to be mapped.
inline void os::map_stack_shadow_pages() {
inline void os::map_stack_shadow_pages(address sp) {
}
inline void os::dll_unload(void *lib) {

View File

@ -142,7 +142,6 @@ int (*os::Linux::_pthread_setname_np)(pthread_t, const char*) = NULL;
Mutex* os::Linux::_createThread_lock = NULL;
pthread_t os::Linux::_main_thread;
int os::Linux::_page_size = -1;
const int os::Linux::_vm_default_page_size = (8 * K);
bool os::Linux::_supports_fast_thread_cpu_time = false;
uint32_t os::Linux::_os_version = 0;
const char * os::Linux::_glibc_version = NULL;
@ -4784,13 +4783,15 @@ jint os::init_2(void) {
// Check minimum allowable stack size for thread creation and to initialize
// the java system classes, including StackOverflowError - depends on page
// size. Add a page for compiler2 recursion in main thread.
// Add in 2*BytesPerWord times page size to account for VM stack during
// size. Add two 4K pages for compiler2 recursion in main thread.
// Add in 4*BytesPerWord 4K pages to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
JavaThread::stack_guard_zone_size() +
JavaThread::stack_shadow_zone_size() +
(2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size());
(4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
os::Linux::min_stack_allowed = align_size_up(os::Linux::min_stack_allowed, os::vm_page_size());
size_t threadStackSizeInBytes = ThreadStackSize * K;
if (threadStackSizeInBytes != 0 &&

Some files were not shown because too many files have changed in this diff Show More