This commit is contained in:
J. Duke 2017-07-05 21:54:46 +02:00
commit 24cdfc6d85
509 changed files with 14362 additions and 14670 deletions

View File

@ -367,3 +367,4 @@ cae471d3b87783e0a3deea658e1e1c84b2485b6c jdk-9+121
346be2df0f5b31d423807f53a719d1b9a67f3354 jdk-9+122
405d811c0d7b9b48ff718ae6c240b732f098c028 jdk-9+123
f80c841ae2545eaf9acd2724bccc305d98cefbe2 jdk-9+124
9aa7d40f3a453f51e47f4c1b19eff5740a74a9f8 jdk-9+125

View File

@ -359,25 +359,32 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
# Starting amount of heap memory.
ADD_JVM_ARG_IF_OK([-Xms64M],boot_jdk_jvmargs_big,[$JAVA])
BOOTCYCLE_JVM_ARGS_BIG=-Xms64M
# Maximum amount of heap memory and stack size.
JVM_HEAP_LIMIT_32="1024"
# Running a 64 bit JVM allows for and requires a bigger heap
JVM_HEAP_LIMIT_64="1600"
STACK_SIZE_32=768
STACK_SIZE_64=1536
JVM_HEAP_LIMIT_GLOBAL=`expr $MEMORY_SIZE / 2`
if test "$JVM_HEAP_LIMIT_GLOBAL" -lt "$JVM_HEAP_LIMIT_32"; then
JVM_HEAP_LIMIT_32=$JVM_HEAP_LIMIT_GLOBAL
fi
if test "$JVM_HEAP_LIMIT_GLOBAL" -lt "$JVM_HEAP_LIMIT_64"; then
JVM_HEAP_LIMIT_64=$JVM_HEAP_LIMIT_GLOBAL
fi
if test "$JVM_HEAP_LIMIT_GLOBAL" -lt "512"; then
JVM_HEAP_LIMIT_32=512
JVM_HEAP_LIMIT_64=512
fi
# Maximum amount of heap memory.
# Maximum stack size.
JVM_MAX_HEAP=`expr $MEMORY_SIZE / 2`
if test "x$BOOT_JDK_BITS" = "x32"; then
if test "$JVM_MAX_HEAP" -gt "1100"; then
JVM_MAX_HEAP=1100
elif test "$JVM_MAX_HEAP" -lt "512"; then
JVM_MAX_HEAP=512
fi
STACK_SIZE=768
STACK_SIZE=$STACK_SIZE_32
JVM_MAX_HEAP=$JVM_HEAP_LIMIT_32
else
# Running a 64 bit JVM allows for and requires a bigger heap
if test "$JVM_MAX_HEAP" -gt "1600"; then
JVM_MAX_HEAP=1600
elif test "$JVM_MAX_HEAP" -lt "512"; then
JVM_MAX_HEAP=512
fi
STACK_SIZE=1536
STACK_SIZE=$STACK_SIZE_64
JVM_MAX_HEAP=$JVM_HEAP_LIMIT_64
fi
ADD_JVM_ARG_IF_OK([-Xmx${JVM_MAX_HEAP}M],boot_jdk_jvmargs_big,[$JAVA])
ADD_JVM_ARG_IF_OK([-XX:ThreadStackSize=$STACK_SIZE],boot_jdk_jvmargs_big,[$JAVA])
@ -387,6 +394,19 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
JAVA_FLAGS_BIG=$boot_jdk_jvmargs_big
AC_SUBST(JAVA_FLAGS_BIG)
if test "x$OPENJDK_TARGET_CPU_BITS" = "x32"; then
BOOTCYCLE_MAX_HEAP=$JVM_HEAP_LIMIT_32
BOOTCYCLE_STACK_SIZE=$STACK_SIZE_32
else
BOOTCYCLE_MAX_HEAP=$JVM_HEAP_LIMIT_64
BOOTCYCLE_STACK_SIZE=$STACK_SIZE_64
fi
BOOTCYCLE_JVM_ARGS_BIG="$BOOTCYCLE_JVM_ARGS_BIG -Xmx${BOOTCYCLE_MAX_HEAP}M"
BOOTCYCLE_JVM_ARGS_BIG="$BOOTCYCLE_JVM_ARGS_BIG -XX:ThreadStackSize=$BOOTCYCLE_STACK_SIZE"
AC_MSG_CHECKING([flags for bootcycle boot jdk java command for big workloads])
AC_MSG_RESULT([$BOOTCYCLE_JVM_ARGS_BIG])
AC_SUBST(BOOTCYCLE_JVM_ARGS_BIG)
# By default, the main javac compilations use big
JAVA_FLAGS_JAVAC="$JAVA_FLAGS_BIG"
AC_SUBST(JAVA_FLAGS_JAVAC)

View File

@ -64,5 +64,7 @@ SJAVAC_SERVER_JAVA_CMD:=$(JAVA_CMD)
# When building a 32bit target, make sure the sjavac server flags are compatible
# with a 32bit JVM.
ifeq ($(OPENJDK_TARGET_CPU_BITS), 32)
SJAVAC_SERVER_JAVA_FLAGS:= -Xms256M -Xmx1500M
SJAVAC_SERVER_JAVA_FLAGS := @BOOTCYCLE_JVM_ARGS_BIG@
endif
# The bootcycle JVM arguments may differ from the original boot jdk.
JAVA_FLAGS_BIG := @BOOTCYCLE_JVM_ARGS_BIG@

View File

@ -367,6 +367,9 @@ AC_DEFUN_ONCE([BPERF_SETUP_PRECOMPILED_HEADERS],
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
AC_MSG_RESULT([no, does not work with Solaris Studio])
USE_PRECOMPILED_HEADER=0
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
AC_MSG_RESULT([no, does not work with xlc])
USE_PRECOMPILED_HEADER=0
else
AC_MSG_RESULT([yes])
fi

View File

@ -593,9 +593,9 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
fi
C_O_FLAG_NONE="-O0"
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
C_O_FLAG_HIGHEST_JVM="-O3"
C_O_FLAG_HIGHEST="-O3"
C_O_FLAG_HI="-O3 -qstrict"
C_O_FLAG_HIGHEST_JVM="-O3 -qhot=level=1 -qinline -qinlglue"
C_O_FLAG_HIGHEST="-O3 -qhot=level=1 -qinline -qinlglue"
C_O_FLAG_HI="-O3 -qinline -qinlglue"
C_O_FLAG_NORM="-O2"
C_O_FLAG_DEBUG="-qnoopt"
# FIXME: Value below not verified.
@ -911,8 +911,8 @@ AC_DEFUN([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK_HELPER],
elif test "x$OPENJDK_$1_OS" = xaix; then
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -DAIX"
# We may need '-qminimaltoc' or '-qpic=large -bbigtoc' if the TOC overflows.
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -qtune=balanced -qhot=level=1 -qinline \
-qinlglue -qalias=noansi -qstrict -qtls=default -qlanglvl=c99vla \
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -qtune=balanced \
-qalias=noansi -qstrict -qtls=default -qlanglvl=c99vla \
-qlanglvl=noredefmac -qnortti -qnoeh -qignerrno"
elif test "x$OPENJDK_$1_OS" = xbsd; then
$2COMMON_CCXXFLAGS_JDK="[$]$2COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE"

View File

@ -644,6 +644,7 @@ SJAVAC_SERVER_JAVA
JAVA_TOOL_FLAGS_SMALL
JAVA_FLAGS_SMALL
JAVA_FLAGS_JAVAC
BOOTCYCLE_JVM_ARGS_BIG
JAVA_FLAGS_BIG
JAVA_FLAGS
TEST_JOBS
@ -5094,7 +5095,7 @@ VS_SDK_PLATFORM_NAME_2013=
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1467039751
DATE_WHEN_GENERATED=1467223237
###############################################################################
#
@ -49625,9 +49626,9 @@ $as_echo "$supports" >&6; }
fi
C_O_FLAG_NONE="-O0"
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
C_O_FLAG_HIGHEST_JVM="-O3"
C_O_FLAG_HIGHEST="-O3"
C_O_FLAG_HI="-O3 -qstrict"
C_O_FLAG_HIGHEST_JVM="-O3 -qhot=level=1 -qinline -qinlglue"
C_O_FLAG_HIGHEST="-O3 -qhot=level=1 -qinline -qinlglue"
C_O_FLAG_HI="-O3 -qinline -qinlglue"
C_O_FLAG_NORM="-O2"
C_O_FLAG_DEBUG="-qnoopt"
# FIXME: Value below not verified.
@ -50634,8 +50635,8 @@ $as_echo "$supports" >&6; }
elif test "x$OPENJDK_TARGET_OS" = xaix; then
JVM_CFLAGS="$JVM_CFLAGS -DAIX"
# We may need '-qminimaltoc' or '-qpic=large -bbigtoc' if the TOC overflows.
JVM_CFLAGS="$JVM_CFLAGS -qtune=balanced -qhot=level=1 -qinline \
-qinlglue -qalias=noansi -qstrict -qtls=default -qlanglvl=c99vla \
JVM_CFLAGS="$JVM_CFLAGS -qtune=balanced \
-qalias=noansi -qstrict -qtls=default -qlanglvl=c99vla \
-qlanglvl=noredefmac -qnortti -qnoeh -qignerrno"
elif test "x$OPENJDK_TARGET_OS" = xbsd; then
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE"
@ -51439,8 +51440,8 @@ $as_echo "$supports" >&6; }
elif test "x$OPENJDK_BUILD_OS" = xaix; then
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -DAIX"
# We may need '-qminimaltoc' or '-qpic=large -bbigtoc' if the TOC overflows.
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -qtune=balanced -qhot=level=1 -qinline \
-qinlglue -qalias=noansi -qstrict -qtls=default -qlanglvl=c99vla \
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -qtune=balanced \
-qalias=noansi -qstrict -qtls=default -qlanglvl=c99vla \
-qlanglvl=noredefmac -qnortti -qnoeh -qignerrno"
elif test "x$OPENJDK_BUILD_OS" = xbsd; then
OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK="$OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE"
@ -53468,7 +53469,7 @@ $as_echo "yes, forced" >&6; }
$as_echo "no, forced" >&6; }
BUILD_GTEST="false"
elif test "x$enable_hotspot_gtest" = "x"; then
if test "x$GTEST_DIR_EXISTS" = "xtrue"; then
if test "x$GTEST_DIR_EXISTS" = "xtrue" && test "x$OPENJDK_TARGET_OS" != "xaix"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
BUILD_GTEST="true"
@ -64612,12 +64613,16 @@ fi
if test "$OPENJDK_TARGET_OS" = "solaris"; then
if test "$OPENJDK_TARGET_OS" = "solaris" && test "x$BUILD_GTEST" = "xtrue"; then
# Find the root of the Solaris Studio installation from the compiler path
SOLARIS_STUDIO_DIR="$(dirname $CC)/.."
STLPORT_LIB="$SOLARIS_STUDIO_DIR/lib/stlport4$OPENJDK_TARGET_CPU_ISADIR/libstlport.so.1"
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for libstlport.so.1" >&5
$as_echo_n "checking for libstlport.so.1... " >&6; }
if ! test -f "$STLPORT_LIB" && test "x$OPENJDK_TARGET_CPU_ISADIR" = "x/sparcv9"; then
# SS12u3 has libstlport under 'stlport4/v9' instead of 'stlport4/sparcv9'
STLPORT_LIB="$SOLARIS_STUDIO_DIR/lib/stlport4/v9/libstlport.so.1"
fi
if test -f "$STLPORT_LIB"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes, $STLPORT_LIB" >&5
$as_echo "yes, $STLPORT_LIB" >&6; }
@ -65118,25 +65123,32 @@ $as_echo_n "checking flags for boot jdk java command for big workloads... " >&6;
JVM_ARG_OK=false
fi
BOOTCYCLE_JVM_ARGS_BIG=-Xms64M
# Maximum amount of heap memory and stack size.
JVM_HEAP_LIMIT_32="1024"
# Running a 64 bit JVM allows for and requires a bigger heap
JVM_HEAP_LIMIT_64="1600"
STACK_SIZE_32=768
STACK_SIZE_64=1536
JVM_HEAP_LIMIT_GLOBAL=`expr $MEMORY_SIZE / 2`
if test "$JVM_HEAP_LIMIT_GLOBAL" -lt "$JVM_HEAP_LIMIT_32"; then
JVM_HEAP_LIMIT_32=$JVM_HEAP_LIMIT_GLOBAL
fi
if test "$JVM_HEAP_LIMIT_GLOBAL" -lt "$JVM_HEAP_LIMIT_64"; then
JVM_HEAP_LIMIT_64=$JVM_HEAP_LIMIT_GLOBAL
fi
if test "$JVM_HEAP_LIMIT_GLOBAL" -lt "512"; then
JVM_HEAP_LIMIT_32=512
JVM_HEAP_LIMIT_64=512
fi
# Maximum amount of heap memory.
# Maximum stack size.
JVM_MAX_HEAP=`expr $MEMORY_SIZE / 2`
if test "x$BOOT_JDK_BITS" = "x32"; then
if test "$JVM_MAX_HEAP" -gt "1100"; then
JVM_MAX_HEAP=1100
elif test "$JVM_MAX_HEAP" -lt "512"; then
JVM_MAX_HEAP=512
fi
STACK_SIZE=768
STACK_SIZE=$STACK_SIZE_32
JVM_MAX_HEAP=$JVM_HEAP_LIMIT_32
else
# Running a 64 bit JVM allows for and requires a bigger heap
if test "$JVM_MAX_HEAP" -gt "1600"; then
JVM_MAX_HEAP=1600
elif test "$JVM_MAX_HEAP" -lt "512"; then
JVM_MAX_HEAP=512
fi
STACK_SIZE=1536
STACK_SIZE=$STACK_SIZE_64
JVM_MAX_HEAP=$JVM_HEAP_LIMIT_64
fi
$ECHO "Check if jvm arg is ok: -Xmx${JVM_MAX_HEAP}M" >&5
@ -65175,6 +65187,21 @@ $as_echo "$boot_jdk_jvmargs_big" >&6; }
JAVA_FLAGS_BIG=$boot_jdk_jvmargs_big
if test "x$OPENJDK_TARGET_CPU_BITS" = "x32"; then
BOOTCYCLE_MAX_HEAP=$JVM_HEAP_LIMIT_32
BOOTCYCLE_STACK_SIZE=$STACK_SIZE_32
else
BOOTCYCLE_MAX_HEAP=$JVM_HEAP_LIMIT_64
BOOTCYCLE_STACK_SIZE=$STACK_SIZE_64
fi
BOOTCYCLE_JVM_ARGS_BIG="$BOOTCYCLE_JVM_ARGS_BIG -Xmx${BOOTCYCLE_MAX_HEAP}M"
BOOTCYCLE_JVM_ARGS_BIG="$BOOTCYCLE_JVM_ARGS_BIG -XX:ThreadStackSize=$BOOTCYCLE_STACK_SIZE"
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for bootcycle boot jdk java command for big workloads" >&5
$as_echo_n "checking flags for bootcycle boot jdk java command for big workloads... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $BOOTCYCLE_JVM_ARGS_BIG" >&5
$as_echo "$BOOTCYCLE_JVM_ARGS_BIG" >&6; }
# By default, the main javac compilations use big
JAVA_FLAGS_JAVAC="$JAVA_FLAGS_BIG"
@ -66132,6 +66159,10 @@ $as_echo "no, does not work effectively with icecc" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no, does not work with Solaris Studio" >&5
$as_echo "no, does not work with Solaris Studio" >&6; }
USE_PRECOMPILED_HEADER=0
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no, does not work with xlc" >&5
$as_echo "no, does not work with xlc" >&6; }
USE_PRECOMPILED_HEADER=0
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }

View File

@ -333,7 +333,7 @@ AC_DEFUN_ONCE([HOTSPOT_ENABLE_DISABLE_GTEST],
AC_MSG_RESULT([no, forced])
BUILD_GTEST="false"
elif test "x$enable_hotspot_gtest" = "x"; then
if test "x$GTEST_DIR_EXISTS" = "xtrue"; then
if test "x$GTEST_DIR_EXISTS" = "xtrue" && test "x$OPENJDK_TARGET_OS" != "xaix"; then
AC_MSG_RESULT([yes])
BUILD_GTEST="true"
else

View File

@ -197,11 +197,15 @@ AC_DEFUN_ONCE([LIB_SETUP_MISC_LIBS],
################################################################################
AC_DEFUN_ONCE([LIB_SETUP_SOLARIS_STLPORT],
[
if test "$OPENJDK_TARGET_OS" = "solaris"; then
if test "$OPENJDK_TARGET_OS" = "solaris" && test "x$BUILD_GTEST" = "xtrue"; then
# Find the root of the Solaris Studio installation from the compiler path
SOLARIS_STUDIO_DIR="$(dirname $CC)/.."
STLPORT_LIB="$SOLARIS_STUDIO_DIR/lib/stlport4$OPENJDK_TARGET_CPU_ISADIR/libstlport.so.1"
AC_MSG_CHECKING([for libstlport.so.1])
if ! test -f "$STLPORT_LIB" && test "x$OPENJDK_TARGET_CPU_ISADIR" = "x/sparcv9"; then
# SS12u3 has libstlport under 'stlport4/v9' instead of 'stlport4/sparcv9'
STLPORT_LIB="$SOLARIS_STUDIO_DIR/lib/stlport4/v9/libstlport.so.1"
fi
if test -f "$STLPORT_LIB"; then
AC_MSG_RESULT([yes, $STLPORT_LIB])
BASIC_FIXUP_PATH([STLPORT_LIB])

View File

@ -578,7 +578,7 @@ SJAVAC_SERVER_JAVA=@FIXPATH@ @FIXPATH_DETACH_FLAG@ $(SJAVAC_SERVER_JAVA_CMD) \
JAVAC_FLAGS?=@JAVAC_FLAGS@
BUILD_JAVA_FLAGS:=-Xms64M -Xmx1100M
BUILD_JAVA_FLAGS := @BOOTCYCLE_JVM_ARGS_BIG@
BUILD_JAVA=@FIXPATH@ $(BUILD_JDK)/bin/java $(BUILD_JAVA_FLAGS)
# Use ?= as this can be overridden from bootcycle-spec.gmk

View File

@ -102,10 +102,13 @@ diff_text() {
# Ignore date strings in class files.
# Anonymous lambda classes get randomly assigned counters in their names.
if test "x$SUFFIX" = "xclass"; then
if [ "$NAME" = "module-info.class" ] || [ "$NAME" = "SystemModules.class" ]
then
# The SystemModules.class and module-info.class have several issues
# with random ordering of elements in HashSets.
if [ "$NAME" = "SystemModules.class" ]; then
# The SystemModules.class is not comparable. The way it is generated is
# too random. It can even be of different size for no apparent reason.
TMP=""
elif [ "$NAME" = "module-info.class" ]; then
# The module-info.class have several issues with random ordering of
# elements in HashSets.
MODULES_CLASS_FILTER="$SED \
-e 's/,$//' \
-e 's/;$//' \
@ -369,6 +372,14 @@ compare_general_files() {
$CAT $OTHER_DIR/$f | eval "$HTML_FILTER" > $OTHER_FILE &
$CAT $THIS_DIR/$f | eval "$HTML_FILTER" > $THIS_FILE &
wait
elif [ "$f" = "./lib/classlist" ]; then
# The classlist files may have some lines in random order
OTHER_FILE=$WORK_DIR/$f.other
THIS_FILE=$WORK_DIR/$f.this
$MKDIR -p $(dirname $OTHER_FILE) $(dirname $THIS_FILE)
$RM $OTHER_FILE $THIS_FILE
$CAT $OTHER_DIR/$f | $SORT > $OTHER_FILE
$CAT $THIS_DIR/$f | $SORT > $THIS_FILE
else
OTHER_FILE=$OTHER_DIR/$f
THIS_FILE=$THIS_DIR/$f
@ -651,7 +662,7 @@ compare_bin_file() {
OTHER_DIZ_FILE=${OTHER_FILE_BASE}.diz
else
# Some files, jli.dll, appears twice in the image but only one of
# thme has a diz file next to it.
# them has a diz file next to it.
OTHER_DIZ_FILE="$($FIND $OTHER_DIR -name $DIZ_NAME | $SED 1q)"
if [ ! -f "$OTHER_DIZ_FILE" ]; then
# As a last resort, look for diz file in the whole build output
@ -1335,6 +1346,24 @@ if [ "$SKIP_DEFAULT" != "true" ]; then
OTHER_JDK="$OTHER/images/jdk"
OTHER_JRE="$OTHER/images/jre"
echo "Selecting jdk images for compare"
elif [ -d "$(ls -d $THIS/licensee-src/build/*/images/jdk)" ] \
&& [ -d "$(ls -d $OTHER/licensee-src/build/*/images/jdk)" ]
then
echo "Selecting licensee images for compare"
# Simply override the THIS and OTHER dir with the build dir from
# the nested licensee source build for the rest of the script
# execution.
OLD_THIS="$THIS"
OLD_OTHER="$OTHER"
THIS="$(ls -d $THIS/licensee-src/build/*)"
OTHER="$(ls -d $OTHER/licensee-src/build/*)"
THIS_JDK="$THIS/images/jdk"
THIS_JRE="$THIS/images/jre"
OTHER_JDK="$OTHER/images/jdk"
OTHER_JRE="$OTHER/images/jre"
# Rewrite the path to tools that are used from the build
JIMAGE="$(echo "$JIMAGE" | $SED "s|$OLD_THIS|$THIS|g")"
JAVAP="$(echo "$JAVAP" | $SED "s|$OLD_THIS|$THIS|g")"
else
echo "No common images found."
exit 1

View File

@ -254,7 +254,7 @@ var getJibProfilesProfiles = function (input, common) {
build_cpu: "x64",
dependencies: concat(common.dependencies, "devkit"),
configure_args: concat(common.configure_args, common.configure_args_32bit,
"--with-jvm-variants=minimal,client,server", "--with-zlib=system"),
"--with-jvm-variants=minimal,server", "--with-zlib=system"),
default_make_targets: common.default_make_targets
},
@ -295,8 +295,7 @@ var getJibProfilesProfiles = function (input, common) {
target_cpu: "x86",
build_cpu: "x64",
dependencies: concat(common.dependencies, "devkit", "freetype"),
configure_args: concat(common.configure_args,
"--with-jvm-variants=client,server", common.configure_args_32bit),
configure_args: concat(common.configure_args, common.configure_args_32bit),
default_make_targets: common.default_make_targets
}
};

View File

@ -367,3 +367,4 @@ daf533920b1266603b5cbdab31908d2a931c5361 jdk-9+119
a39131aafc51a6fd8836e6ebe1b04458702ce7d6 jdk-9+122
e33a34cc551907617d8129c4faaf1a5a7e61d21c jdk-9+123
45121d5afb9d5bfadab75378572ad96832e0809e jdk-9+124
1d48e67d1b91eb9f72e49e69a4021edb85e357fc jdk-9+125

View File

@ -527,3 +527,4 @@ b64432bae5271735fd53300b2005b713e98ef411 jdk-9+114
af6b4ad908e732d23021f12e8322b204433d5cf6 jdk-9+122
75f81e1fecfb444f34f357295fe06af60e2762d9 jdk-9+123
479631362b4930be985245ea063d87d821a472eb jdk-9+124
bb640b49741af3f57f9994129934c46fc173219f jdk-9+125

View File

@ -153,6 +153,13 @@ else ifeq ($(OPENJDK_TARGET_OS), aix)
# mode, so don't optimize sharedRuntimeTrig.cpp at all.
BUILD_LIBJVM_sharedRuntimeTrig.cpp_CXXFLAGS := $(CXX_O_FLAG_NONE)
ifneq ($(DEBUG_LEVEL),slowdebug)
# Compiling jvmtiEnterTrace.cpp with full optimization needs more than 30min
# (mostly because of '-qhot=level=1' and the more than 1300 'log_trace' calls
# which cause a lot of template expansion).
BUILD_LIBJVM_jvmtiEnterTrace.cpp_OPTIMIZATION := LOW
endif
# Disable ELF decoder on AIX (AIX uses XCOFF).
JVM_EXCLUDE_PATTERNS += elf

View File

@ -12179,21 +12179,21 @@ instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlag
%}
%}
instruct rolI_rReg_Var_C_32(iRegLNoSp dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
%{
match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
expand %{
rolL_rReg(dst, src, shift, cr);
rolI_rReg(dst, src, shift, cr);
%}
%}
instruct rolI_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
%{
match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
expand %{
rolL_rReg(dst, src, shift, cr);
rolI_rReg(dst, src, shift, cr);
%}
%}

View File

@ -944,8 +944,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Register t = r5;
__ load_klass(t, r0);
__ ldrw(t, Address(t, Klass::access_flags_offset()));
__ tst(t, JVM_ACC_HAS_FINALIZER);
__ br(Assembler::NE, register_finalizer);
__ tbnz(t, exact_log2(JVM_ACC_HAS_FINALIZER), register_finalizer);
__ ret(lr);
__ bind(register_finalizer);

View File

@ -93,10 +93,8 @@ void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread)
// This method is only called just after the call into the vm in
// call_VM_base, so the arg registers are available.
ldrw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
tstw(rscratch1, JavaThread::popframe_pending_bit);
br(Assembler::EQ, L);
tstw(rscratch1, JavaThread::popframe_processing_bit);
br(Assembler::NE, L);
tbz(rscratch1, exact_log2(JavaThread::popframe_pending_bit), L);
tbnz(rscratch1, exact_log2(JavaThread::popframe_processing_bit), L);
// Call Interpreter::remove_activation_preserving_args_entry() to get the
// address of the same-named entrypoint in the generated interpreter code.
call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
@ -505,8 +503,7 @@ void InterpreterMacroAssembler::remove_activation(
// get method access flags
ldr(r1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
ldr(r2, Address(r1, Method::access_flags_offset()));
tst(r2, JVM_ACC_SYNCHRONIZED);
br(Assembler::EQ, unlocked);
tbz(r2, exact_log2(JVM_ACC_SYNCHRONIZED), unlocked);
// Don't unlock anything if the _do_not_unlock_if_synchronized flag
// is set.
@ -1582,8 +1579,8 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md
// do. The unknown bit may have been
// set already but no need to check.
tst(obj, TypeEntries::type_unknown);
br(Assembler::NE, next); // already unknown. Nothing to do anymore.
tbnz(obj, exact_log2(TypeEntries::type_unknown), next);
// already unknown. Nothing to do anymore.
ldr(rscratch1, mdo_addr);
cbz(rscratch1, none);

View File

@ -710,7 +710,7 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_loop);
__ strb(zr, Address(start, count));
__ subs(count, count, 1);
__ br(Assembler::HS, L_loop);
__ br(Assembler::GE, L_loop);
}
break;
default:
@ -1299,7 +1299,7 @@ class StubGenerator: public StubCodeGenerator {
if (VerifyOops)
verify_oop_array(size, d, count, r16);
__ sub(count, count, 1); // make an inclusive end pointer
__ lea(count, Address(d, count, Address::uxtw(exact_log2(size))));
__ lea(count, Address(d, count, Address::lsl(exact_log2(size))));
gen_write_ref_array_post_barrier(d, count, rscratch1);
}
__ leave();
@ -2002,9 +2002,9 @@ class StubGenerator: public StubCodeGenerator {
arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
rscratch2, L_failed);
__ lea(from, Address(src, src_pos, Address::lsl(3)));
__ lea(from, Address(src, src_pos, Address::lsl(LogBytesPerHeapOop)));
__ add(from, from, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
__ lea(to, Address(dst, dst_pos, Address::lsl(3)));
__ lea(to, Address(dst, dst_pos, Address::lsl(LogBytesPerHeapOop)));
__ add(to, to, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
__ movw(count, scratch_length); // length
__ BIND(L_plain_copy);
@ -2027,9 +2027,9 @@ class StubGenerator: public StubCodeGenerator {
__ load_klass(rscratch2_dst_klass, dst); // reload
// Marshal the base address arguments now, freeing registers.
__ lea(from, Address(src, src_pos, Address::lsl(3)));
__ lea(from, Address(src, src_pos, Address::lsl(LogBytesPerHeapOop)));
__ add(from, from, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
__ lea(to, Address(dst, dst_pos, Address::lsl(3)));
__ lea(to, Address(dst, dst_pos, Address::lsl(LogBytesPerHeapOop)));
__ add(to, to, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
__ movw(count, length); // length (reloaded)
Register sco_temp = c_rarg3; // this register is free now

View File

@ -1242,8 +1242,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
{
Label L;
__ ldrw(t, Address(rmethod, Method::access_flags_offset()));
__ tst(t, JVM_ACC_STATIC);
__ br(Assembler::EQ, L);
__ tbz(t, exact_log2(JVM_ACC_STATIC), L);
// get mirror
__ load_mirror(t, rmethod);
// copy mirror into activation frame
@ -1435,8 +1434,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
{
Label L;
__ ldrw(t, Address(rmethod, Method::access_flags_offset()));
__ tst(t, JVM_ACC_SYNCHRONIZED);
__ br(Assembler::EQ, L);
__ tbz(t, exact_log2(JVM_ACC_SYNCHRONIZED), L);
// the code below should be shared with interpreter macro
// assembler implementation
{

View File

@ -2190,9 +2190,8 @@ void TemplateTable::_return(TosState state)
__ ldr(c_rarg1, aaddress(0));
__ load_klass(r3, c_rarg1);
__ ldrw(r3, Address(r3, Klass::access_flags_offset()));
__ tst(r3, JVM_ACC_HAS_FINALIZER);
Label skip_register_finalizer;
__ br(Assembler::EQ, skip_register_finalizer);
__ tbz(r3, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1);

View File

@ -503,6 +503,10 @@ class Assembler : public AbstractAssembler {
LVSL_OPCODE = (31u << OPCODE_SHIFT | 6u << 1),
LVSR_OPCODE = (31u << OPCODE_SHIFT | 38u << 1),
// Vector-Scalar (VSX) instruction support.
LXVD2X_OPCODE = (31u << OPCODE_SHIFT | 844u << 1),
STXVD2X_OPCODE = (31u << OPCODE_SHIFT | 972u << 1),
// Vector Permute and Formatting
VPKPX_OPCODE = (4u << OPCODE_SHIFT | 782u ),
VPKSHSS_OPCODE = (4u << OPCODE_SHIFT | 398u ),
@ -1085,6 +1089,19 @@ class Assembler : public AbstractAssembler {
static int vrs( VectorRegister r) { return vrs(r->encoding());}
static int vrt( VectorRegister r) { return vrt(r->encoding());}
// Support Vector-Scalar (VSX) instructions.
static int vsra( int x) { return opp_u_field(x, 15, 11); }
static int vsrb( int x) { return opp_u_field(x, 20, 16); }
static int vsrc( int x) { return opp_u_field(x, 25, 21); }
static int vsrs( int x) { return opp_u_field(x, 10, 6); }
static int vsrt( int x) { return opp_u_field(x, 10, 6); }
static int vsra( VectorSRegister r) { return vsra(r->encoding());}
static int vsrb( VectorSRegister r) { return vsrb(r->encoding());}
static int vsrc( VectorSRegister r) { return vsrc(r->encoding());}
static int vsrs( VectorSRegister r) { return vsrs(r->encoding());}
static int vsrt( VectorSRegister r) { return vsrt(r->encoding());}
static int vsplt_uim( int x) { return opp_u_field(x, 15, 12); } // for vsplt* instructions
static int vsplti_sim(int x) { return opp_u_field(x, 15, 11); } // for vsplti* instructions
static int vsldoi_shb(int x) { return opp_u_field(x, 25, 22); } // for vsldoi instruction
@ -2069,6 +2086,10 @@ class Assembler : public AbstractAssembler {
inline void mtvscr( VectorRegister b);
inline void mfvscr( VectorRegister d);
// Vector-Scalar (VSX) instructions.
inline void lxvd2x( VectorSRegister d, Register a, Register b);
inline void stxvd2x( VectorSRegister d, Register a, Register b);
// AES (introduced with Power 8)
inline void vcipher( VectorRegister d, VectorRegister a, VectorRegister b);
inline void vcipherlast( VectorRegister d, VectorRegister a, VectorRegister b);

View File

@ -724,6 +724,10 @@ inline void Assembler::stvxl( VectorRegister d, Register s1, Register s2) { emit
inline void Assembler::lvsl( VectorRegister d, Register s1, Register s2) { emit_int32( LVSL_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::lvsr( VectorRegister d, Register s1, Register s2) { emit_int32( LVSR_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
// Vector-Scalar (VSX) instructions.
inline void Assembler::lxvd2x (VectorSRegister d, Register s1, Register s2) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra(s1) | rb(s2)); }
inline void Assembler::stxvd2x(VectorSRegister d, Register s1, Register s2) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra(s1) | rb(s2)); }
inline void Assembler::vpkpx( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKPX_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vpkshss( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSHSS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vpkswss( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSWSS_OPCODE | vrt(d) | vra(a) | vrb(b)); }

View File

@ -75,3 +75,14 @@ const char* VectorRegisterImpl::name() const {
};
return is_valid() ? names[encoding()] : "vnoreg";
}
const char* VectorSRegisterImpl::name() const {
const char* names[number_of_registers] = {
"VSR0", "VSR1", "VSR2", "VSR3", "VSR4", "VSR5", "VSR6", "VSR7",
"VSR8", "VSR9", "VSR10", "VSR11", "VSR12", "VSR13", "VSR14", "VSR15",
"VSR16", "VSR17", "VSR18", "VSR19", "VSR20", "VSR21", "VSR22", "VSR23",
"VSR24", "VSR25", "VSR26", "VSR27", "VSR28", "VSR29", "VSR30", "VSR31"
};
return is_valid() ? names[encoding()] : "vsnoreg";
}

View File

@ -491,6 +491,106 @@ CONSTANT_REGISTER_DECLARATION(VectorRegister, VR31, (31));
#endif // DONT_USE_REGISTER_DEFINES
// Use VectorSRegister as a shortcut.
class VectorSRegisterImpl;
typedef VectorSRegisterImpl* VectorSRegister;
inline VectorSRegister as_VectorSRegister(int encoding) {
return (VectorSRegister)(intptr_t)encoding;
}
// The implementation of Vector-Scalar (VSX) registers on POWER architecture.
class VectorSRegisterImpl: public AbstractRegisterImpl {
public:
enum {
number_of_registers = 32
};
// construction
inline friend VectorSRegister as_VectorSRegister(int encoding);
// accessors
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
// testers
bool is_valid() const { return 0 <= value() && value() < number_of_registers; }
const char* name() const;
};
// The Vector-Scalar (VSX) registers of the POWER architecture.
CONSTANT_REGISTER_DECLARATION(VectorSRegister, vsnoreg, (-1));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR0, ( 0));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR1, ( 1));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR2, ( 2));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR3, ( 3));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR4, ( 4));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR5, ( 5));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR6, ( 6));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR7, ( 7));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR8, ( 8));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR9, ( 9));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR10, (10));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR11, (11));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR12, (12));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR13, (13));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR14, (14));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR15, (15));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR16, (16));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR17, (17));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR18, (18));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR19, (19));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR20, (20));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR21, (21));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR22, (22));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR23, (23));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR24, (24));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR25, (25));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR26, (26));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR27, (27));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR28, (28));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR29, (29));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR30, (30));
CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR31, (31));
#ifndef DONT_USE_REGISTER_DEFINES
#define vsnoregi ((VectorSRegister)(vsnoreg_VectorSRegisterEnumValue))
#define VSR0 ((VectorSRegister)( VSR0_VectorSRegisterEnumValue))
#define VSR1 ((VectorSRegister)( VSR1_VectorSRegisterEnumValue))
#define VSR2 ((VectorSRegister)( VSR2_VectorSRegisterEnumValue))
#define VSR3 ((VectorSRegister)( VSR3_VectorSRegisterEnumValue))
#define VSR4 ((VectorSRegister)( VSR4_VectorSRegisterEnumValue))
#define VSR5 ((VectorSRegister)( VSR5_VectorSRegisterEnumValue))
#define VSR6 ((VectorSRegister)( VSR6_VectorSRegisterEnumValue))
#define VSR7 ((VectorSRegister)( VSR7_VectorSRegisterEnumValue))
#define VSR8 ((VectorSRegister)( VSR8_VectorSRegisterEnumValue))
#define VSR9 ((VectorSRegister)( VSR9_VectorSRegisterEnumValue))
#define VSR10 ((VectorSRegister)( VSR10_VectorSRegisterEnumValue))
#define VSR11 ((VectorSRegister)( VSR11_VectorSRegisterEnumValue))
#define VSR12 ((VectorSRegister)( VSR12_VectorSRegisterEnumValue))
#define VSR13 ((VectorSRegister)( VSR13_VectorSRegisterEnumValue))
#define VSR14 ((VectorSRegister)( VSR14_VectorSRegisterEnumValue))
#define VSR15 ((VectorSRegister)( VSR15_VectorSRegisterEnumValue))
#define VSR16 ((VectorSRegister)( VSR16_VectorSRegisterEnumValue))
#define VSR17 ((VectorSRegister)( VSR17_VectorSRegisterEnumValue))
#define VSR18 ((VectorSRegister)( VSR18_VectorSRegisterEnumValue))
#define VSR19 ((VectorSRegister)( VSR19_VectorSRegisterEnumValue))
#define VSR20 ((VectorSRegister)( VSR20_VectorSRegisterEnumValue))
#define VSR21 ((VectorSRegister)( VSR21_VectorSRegisterEnumValue))
#define VSR22 ((VectorSRegister)( VSR22_VectorSRegisterEnumValue))
#define VSR23 ((VectorSRegister)( VSR23_VectorSRegisterEnumValue))
#define VSR24 ((VectorSRegister)( VSR24_VectorSRegisterEnumValue))
#define VSR25 ((VectorSRegister)( VSR25_VectorSRegisterEnumValue))
#define VSR26 ((VectorSRegister)( VSR26_VectorSRegisterEnumValue))
#define VSR27 ((VectorSRegister)( VSR27_VectorSRegisterEnumValue))
#define VSR28 ((VectorSRegister)( VSR28_VectorSRegisterEnumValue))
#define VSR29 ((VectorSRegister)( VSR29_VectorSRegisterEnumValue))
#define VSR30 ((VectorSRegister)( VSR30_VectorSRegisterEnumValue))
#define VSR31 ((VectorSRegister)( VSR31_VectorSRegisterEnumValue))
#endif // DONT_USE_REGISTER_DEFINES
// Maximum number of incoming arguments that can be passed in i registers.
const int PPC_ARGS_IN_REGS_NUM = 8;

View File

@ -1341,10 +1341,13 @@ class StubGenerator: public StubCodeGenerator {
Register tmp3 = R8_ARG6;
Register tmp4 = R9_ARG7;
VectorSRegister tmp_vsr1 = VSR1;
VectorSRegister tmp_vsr2 = VSR2;
address start = __ function_entry();
assert_positive_int(R5_ARG3);
Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8;
Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9;
// don't try anything fancy if arrays don't have many elements
__ li(tmp3, 0);
@ -1403,22 +1406,60 @@ class StubGenerator: public StubCodeGenerator {
__ andi_(R5_ARG3, R5_ARG3, 15);
__ mtctr(tmp1);
__ bind(l_8);
// Use unrolled version for mass copying (copy 16 elements a time).
// Load feeding store gets zero latency on Power6, however not on Power5.
// Therefore, the following sequence is made for the good of both.
__ ld(tmp1, 0, R3_ARG1);
__ ld(tmp2, 8, R3_ARG1);
__ ld(tmp3, 16, R3_ARG1);
__ ld(tmp4, 24, R3_ARG1);
__ std(tmp1, 0, R4_ARG2);
__ std(tmp2, 8, R4_ARG2);
__ std(tmp3, 16, R4_ARG2);
__ std(tmp4, 24, R4_ARG2);
__ addi(R3_ARG1, R3_ARG1, 32);
__ addi(R4_ARG2, R4_ARG2, 32);
__ bdnz(l_8);
}
if (!VM_Version::has_vsx()) {
__ bind(l_8);
// Use unrolled version for mass copying (copy 16 elements a time).
// Load feeding store gets zero latency on Power6, however not on Power5.
// Therefore, the following sequence is made for the good of both.
__ ld(tmp1, 0, R3_ARG1);
__ ld(tmp2, 8, R3_ARG1);
__ ld(tmp3, 16, R3_ARG1);
__ ld(tmp4, 24, R3_ARG1);
__ std(tmp1, 0, R4_ARG2);
__ std(tmp2, 8, R4_ARG2);
__ std(tmp3, 16, R4_ARG2);
__ std(tmp4, 24, R4_ARG2);
__ addi(R3_ARG1, R3_ARG1, 32);
__ addi(R4_ARG2, R4_ARG2, 32);
__ bdnz(l_8);
} else { // Processor supports VSX, so use it to mass copy.
// Prefetch src data into L2 cache.
__ dcbt(R3_ARG1, 0);
// If supported set DSCR pre-fetch to deepest.
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. It's not aligned 16-byte
// as loop contains < 8 instructions that fit inside a single
// i-cache sector.
__ align(32);
__ bind(l_9);
// Use loop with VSX load/store instructions to
// copy 16 elements a time.
__ lxvd2x(tmp_vsr1, 0, R3_ARG1); // Load from src.
__ stxvd2x(tmp_vsr1, 0, R4_ARG2); // Store to dst.
__ lxvd2x(tmp_vsr2, R3_ARG1, tmp1); // Load from src + 16.
__ stxvd2x(tmp_vsr2, R4_ARG2, tmp1); // Store to dst + 16.
__ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32.
__ addi(R4_ARG2, R4_ARG2, 32); // Update dsc+=32.
__ bdnz(l_9); // Dec CTR and loop if not zero.
// Restore DSCR pre-fetch value.
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
}
} // FasterArrayCopy
__ bind(l_6);
// copy 2 elements at a time

View File

@ -38,7 +38,7 @@
# include <sys/sysinfo.h>
bool VM_Version::_is_determine_features_test_running = false;
uint64_t VM_Version::_dscr_val = 0;
#define MSG(flag) \
if (flag && !FLAG_IS_DEFAULT(flag)) \
@ -111,7 +111,7 @@ void VM_Version::initialize() {
// Create and print feature-string.
char buf[(num_features+1) * 16]; // Max 16 chars per feature.
jio_snprintf(buf, sizeof(buf),
"ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s",
"ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
(has_fsqrt() ? " fsqrt" : ""),
(has_isel() ? " isel" : ""),
(has_lxarxeh() ? " lxarxeh" : ""),
@ -125,7 +125,8 @@ void VM_Version::initialize() {
(has_vcipher() ? " aes" : ""),
(has_vpmsumb() ? " vpmsumb" : ""),
(has_tcheck() ? " tcheck" : ""),
(has_mfdscr() ? " mfdscr" : "")
(has_mfdscr() ? " mfdscr" : ""),
(has_vsx() ? " vsx" : "")
// Make sure number of %s matches num_features!
);
_features_string = os::strdup(buf);
@ -643,6 +644,7 @@ void VM_Version::determine_features() {
a->vpmsumb(VR0, VR1, VR2); // code[11] -> vpmsumb
a->tcheck(0); // code[12] -> tcheck
a->mfdscr(R0); // code[13] -> mfdscr
a->lxvd2x(VSR0, 0, R3_ARG1); // code[14] -> vsx
a->blr();
// Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
@ -691,6 +693,7 @@ void VM_Version::determine_features() {
if (code[feature_cntr++]) features |= vpmsumb_m;
if (code[feature_cntr++]) features |= tcheck_m;
if (code[feature_cntr++]) features |= mfdscr_m;
if (code[feature_cntr++]) features |= vsx_m;
// Print the detection code.
if (PrintAssembly) {
@ -733,31 +736,31 @@ void VM_Version::config_dscr() {
}
// Apply the configuration if needed.
uint64_t dscr_val = (*get_dscr)();
_dscr_val = (*get_dscr)();
if (Verbose) {
tty->print_cr("dscr value was 0x%lx" , dscr_val);
tty->print_cr("dscr value was 0x%lx" , _dscr_val);
}
bool change_requested = false;
if (DSCR_PPC64 != (uintx)-1) {
dscr_val = DSCR_PPC64;
_dscr_val = DSCR_PPC64;
change_requested = true;
}
if (DSCR_DPFD_PPC64 <= 7) {
uint64_t mask = 0x7;
if ((dscr_val & mask) != DSCR_DPFD_PPC64) {
dscr_val = (dscr_val & ~mask) | (DSCR_DPFD_PPC64);
if ((_dscr_val & mask) != DSCR_DPFD_PPC64) {
_dscr_val = (_dscr_val & ~mask) | (DSCR_DPFD_PPC64);
change_requested = true;
}
}
if (DSCR_URG_PPC64 <= 7) {
uint64_t mask = 0x7 << 6;
if ((dscr_val & mask) != DSCR_DPFD_PPC64 << 6) {
dscr_val = (dscr_val & ~mask) | (DSCR_URG_PPC64 << 6);
if ((_dscr_val & mask) != DSCR_DPFD_PPC64 << 6) {
_dscr_val = (_dscr_val & ~mask) | (DSCR_URG_PPC64 << 6);
change_requested = true;
}
}
if (change_requested) {
(*set_dscr)(dscr_val);
(*set_dscr)(_dscr_val);
if (Verbose) {
tty->print_cr("dscr was set to 0x%lx" , (*get_dscr)());
}

View File

@ -46,6 +46,7 @@ protected:
vpmsumb,
tcheck,
mfdscr,
vsx,
num_features // last entry to count features
};
enum Feature_Flag_Set {
@ -64,6 +65,7 @@ protected:
vpmsumb_m = (1 << vpmsumb),
tcheck_m = (1 << tcheck ),
mfdscr_m = (1 << mfdscr ),
vsx_m = (1 << vsx ),
all_features_m = (unsigned long)-1
};
@ -97,10 +99,14 @@ public:
static bool has_vpmsumb() { return (_features & vpmsumb_m) != 0; }
static bool has_tcheck() { return (_features & tcheck_m) != 0; }
static bool has_mfdscr() { return (_features & mfdscr_m) != 0; }
static bool has_vsx() { return (_features & vsx_m) != 0; }
// Assembler testing
static void allow_all();
static void revert();
// POWER 8: DSCR current value.
static uint64_t _dscr_val;
};
#endif // CPU_PPC_VM_VM_VERSION_PPC_HPP

View File

@ -68,7 +68,6 @@ void LinearScan::allocate_fpu_stack() {
if (b->number_of_preds() > 1) {
int id = b->first_lir_instruction_id();
ResourceBitMap regs(FrameMap::nof_fpu_regs);
regs.clear();
iw.walk_to(id); // walk after the first instruction (always a label) of the block
assert(iw.current_position() == id, "did not walk completely to id");

View File

@ -7046,7 +7046,6 @@ void MacroAssembler::string_indexofC8(Register str1, Register str2,
int ae) {
ShortBranchVerifier sbv(this);
assert(UseSSE42Intrinsics, "SSE4.2 intrinsics are required");
assert(UseSSE >= 4, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
assert(ae != StrIntrinsicNode::LU, "Invalid encoding");
// This method uses the pcmpestri instruction with bound registers
@ -7225,7 +7224,6 @@ void MacroAssembler::string_indexof(Register str1, Register str2,
int ae) {
ShortBranchVerifier sbv(this);
assert(UseSSE42Intrinsics, "SSE4.2 intrinsics are required");
assert(UseSSE >= 4, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
assert(ae != StrIntrinsicNode::LU, "Invalid encoding");
//
@ -7543,7 +7541,6 @@ void MacroAssembler::string_indexof_char(Register str1, Register cnt1, Register
XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp) {
ShortBranchVerifier sbv(this);
assert(UseSSE42Intrinsics, "SSE4.2 intrinsics are required");
assert(UseSSE >= 4, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
int stride = 8;
@ -7723,7 +7720,6 @@ void MacroAssembler::string_compare(Register str1, Register str2,
}
if (UseAVX >= 2 && UseSSE42Intrinsics) {
assert(UseSSE >= 4, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_WIDE_TAIL, COMPARE_SMALL_STR;
Label COMPARE_WIDE_VECTORS_LOOP, COMPARE_16_CHARS, COMPARE_INDEX_CHAR;
Label COMPARE_WIDE_VECTORS_LOOP_AVX2;
@ -7891,7 +7887,6 @@ void MacroAssembler::string_compare(Register str1, Register str2,
bind(COMPARE_SMALL_STR);
} else if (UseSSE42Intrinsics) {
assert(UseSSE >= 4, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL;
int pcmpmask = 0x19;
// Setup to compare 8-char (16-byte) vectors,
@ -8179,7 +8174,6 @@ void MacroAssembler::has_negatives(Register ary1, Register len,
// Fallthru to tail compare
}
else if (UseSSE42Intrinsics) {
assert(UseSSE >= 4, "SSE4 must be for SSE4.2 intrinsics to be available");
// With SSE4.2, use double quad vector compare
Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
@ -8383,7 +8377,6 @@ void MacroAssembler::arrays_equals(bool is_array_equ, Register ary1, Register ar
movl(limit, result);
// Fallthru to tail compare
} else if (UseSSE42Intrinsics) {
assert(UseSSE >= 4, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
// With SSE4.2, use double quad vector compare
Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
@ -8747,7 +8740,6 @@ void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
negptr(len);
if (UseSSE42Intrinsics || UseAVX >= 2) {
assert(UseSSE42Intrinsics ? UseSSE >= 4 : true, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
Label L_chars_8_check, L_copy_8_chars, L_copy_8_chars_exit;
Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit;
@ -10159,7 +10151,13 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Regi
movdqa(xmm1, Address(buf, 0));
movdl(rax, xmm1);
xorl(crc, rax);
pinsrd(xmm1, crc, 0);
if (VM_Version::supports_sse4_1()) {
pinsrd(xmm1, crc, 0);
} else {
pinsrw(xmm1, crc, 0);
shrl(crc, 16);
pinsrw(xmm1, crc, 1);
}
addptr(buf, 16);
subl(len, 4); // len > 0
jcc(Assembler::less, L_fold_tail);
@ -10881,7 +10879,6 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le
clear_vector_masking(); // closing of the stub context for programming mask registers
}
if (UseSSE42Intrinsics) {
assert(UseSSE >= 4, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
Label copy_32_loop, copy_16, copy_tail;
bind(below_threshold);
@ -11045,7 +11042,6 @@ void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len
clear_vector_masking(); // closing of the stub context for programming mask registers
}
if (UseSSE42Intrinsics) {
assert(UseSSE >= 4, "SSE4 must be enabled for SSE4.2 intrinsics to be available");
Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail;
movl(tmp2, len);

View File

@ -658,7 +658,7 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
} else {
if(supports_sse4_1() && UseSSE >= 4) {
if(supports_sse4_1()) {
if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true);
}
@ -970,7 +970,7 @@ void VM_Version::get_processor_features() {
UseXmmI2D = false;
}
}
if (supports_sse4_2() && UseSSE >= 4) {
if (supports_sse4_2()) {
if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
}
@ -1050,7 +1050,7 @@ void VM_Version::get_processor_features() {
UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
}
}
if (supports_sse4_2() && UseSSE >= 4) {
if (supports_sse4_2()) {
if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
}

View File

@ -1718,7 +1718,7 @@ const bool Matcher::match_rule_supported(int opcode) {
ret_value = false;
break;
case Op_StrIndexOfChar:
if (!(UseSSE > 4))
if (!UseSSE42Intrinsics)
ret_value = false;
break;
case Op_OnSpinWait:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -96,6 +96,8 @@ public class Universe {
narrowOopShiftField = type.getCIntegerField("_narrow_oop._shift");
narrowKlassBaseField = type.getAddressField("_narrow_klass._base");
narrowKlassShiftField = type.getCIntegerField("_narrow_klass._shift");
UniverseExt.initialize(heapConstructor);
}
public Universe() {

View File

@ -1,12 +1,10 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@ -21,24 +19,13 @@
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package jdk.tools.jlink.plugin;
package sun.jvm.hotspot.memory;
/**
* Implement this interface to develop a Transformer plugin.
* TransformerPlugin are called during image creation. This kind of plugin aims to
* modify the content of the runtime image.
*/
public interface TransformerPlugin extends Plugin {
/**
* Visit the content of the modules that are composing the image.
*
* @param in Read only content.
* @param out The pool to fill with content. This pool must contain
* the result of the visit.
*
* @throws PluginException
*/
public void visit(ModulePool in, ModulePool out);
import sun.jvm.hotspot.runtime.*;
public class UniverseExt {
public static void initialize(VirtualConstructor heapConstructor) { }
}

View File

@ -31,10 +31,13 @@ import java.lang.reflect.Array;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.ServiceLoader;
import java.util.TreeMap;
import jdk.internal.misc.VM;
@ -213,7 +216,22 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
private final Map<Class<? extends Architecture>, JVMCIBackend> backends = new HashMap<>();
private final Iterable<HotSpotVMEventListener> vmEventListeners;
private volatile List<HotSpotVMEventListener> vmEventListeners;
private Iterable<HotSpotVMEventListener> getVmEventListeners() {
if (vmEventListeners == null) {
synchronized (this) {
if (vmEventListeners == null) {
List<HotSpotVMEventListener> listeners = new ArrayList<>();
for (HotSpotVMEventListener vmEventListener : ServiceLoader.load(HotSpotVMEventListener.class)) {
listeners.add(vmEventListener);
}
vmEventListeners = listeners;
}
}
}
return vmEventListeners;
}
/**
* Stores the result of {@link HotSpotJVMCICompilerFactory#getTrivialPrefixes()} so that it can
@ -240,8 +258,6 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
hostBackend = registerBackend(factory.createJVMCIBackend(this, null));
}
vmEventListeners = Services.load(HotSpotVMEventListener.class);
metaAccessContext = new HotSpotJVMCIMetaAccessContext();
boolean printFlags = Option.PrintFlags.getBoolean();
@ -370,7 +386,7 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
*/
@SuppressWarnings({"unused"})
private void shutdown() throws Exception {
for (HotSpotVMEventListener vmEventListener : vmEventListeners) {
for (HotSpotVMEventListener vmEventListener : getVmEventListeners()) {
vmEventListener.notifyShutdown();
}
}
@ -382,7 +398,7 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
*/
@SuppressWarnings({"unused"})
private void bootstrapFinished() throws Exception {
for (HotSpotVMEventListener vmEventListener : vmEventListeners) {
for (HotSpotVMEventListener vmEventListener : getVmEventListeners()) {
vmEventListener.notifyBootstrapFinished();
}
}
@ -395,7 +411,7 @@ public final class HotSpotJVMCIRuntime implements HotSpotJVMCIRuntimeProvider {
* @param compiledCode
*/
void notifyInstall(HotSpotCodeCacheProvider hotSpotCodeCacheProvider, InstalledCode installedCode, CompiledCode compiledCode) {
for (HotSpotVMEventListener vmEventListener : vmEventListeners) {
for (HotSpotVMEventListener vmEventListener : getVmEventListeners()) {
vmEventListener.notifyInstall(hotSpotCodeCacheProvider, installedCode, compiledCode);
}
}

View File

@ -144,8 +144,18 @@ class HotSpotMemoryAccessProviderImpl implements HotSpotMemoryAccessProvider {
return ret;
}
@Override
public JavaConstant readUnsafeConstant(JavaKind kind, JavaConstant baseConstant, long displacement) {
/**
* Reads a value of this kind using a base address and a displacement. No bounds checking or
* type checking is performed. Returns {@code null} if the value is not available at this point.
*
* @param baseConstant the base address from which the value is read.
* @param displacement the displacement within the object in bytes
* @return the read value encapsulated in a {@link JavaConstant} object, or {@code null} if the
* value cannot be read.
* @throws IllegalArgumentException if {@code kind} is {@code null}, {@link JavaKind#Void}, not
* {@link JavaKind#Object} or not {@linkplain JavaKind#isPrimitive() primitive} kind
*/
JavaConstant readUnsafeConstant(JavaKind kind, JavaConstant baseConstant, long displacement) {
if (kind == null) {
throw new IllegalArgumentException("null JavaKind");
}

View File

@ -27,19 +27,6 @@ package jdk.vm.ci.meta;
*/
public interface MemoryAccessProvider {
/**
* Reads a value of this kind using a base address and a displacement. No bounds checking or
* type checking is performed. Returns {@code null} if the value is not available at this point.
*
* @param base the base address from which the value is read.
* @param displacement the displacement within the object in bytes
* @return the read value encapsulated in a {@link JavaConstant} object, or {@code null} if the
* value cannot be read.
* @throws IllegalArgumentException if {@code kind} is {@code null}, {@link JavaKind#Void}, not
* {@link JavaKind#Object} or not {@linkplain JavaKind#isPrimitive() primitive} kind
*/
JavaConstant readUnsafeConstant(JavaKind kind, JavaConstant base, long displacement) throws IllegalArgumentException;
/**
* Reads a primitive value using a base address and a displacement.
*

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,17 +22,49 @@
*
*/
// This is only a stub. Will flesh out later when/if we add further support
// for PASE.
#include "libo4.hpp"
bool libo4::init() { return false; }
void libo4::cleanup() {}
bool libo4::get_memory_info (unsigned long long* p_virt_total, unsigned long long* p_real_total,
unsigned long long* p_real_free, unsigned long long* p_pgsp_total, unsigned long long* p_pgsp_free) {
// global variables
// whether initialization worked
static bool g_initialized = false;
//////////////////////////
// class libo4 - impl //
//////////////////////////
bool libo4::init() {
if (g_initialized) {
return true;
}
return false;
}
bool libo4::get_load_avg (double* p_avg1, double* p_avg5, double* p_avg15) { return false; }
bool libo4::realpath (const char* file_name, char* resolved_name, int resolved_name_len) { return false; }
void libo4::cleanup() {
if (g_initialized) {
g_initialized = false;
}
}
bool libo4::get_memory_info(unsigned long long* p_virt_total,
unsigned long long* p_real_total,
unsigned long long* p_real_free,
unsigned long long* p_pgsp_total,
unsigned long long* p_pgsp_free) {
return false;
}
bool libo4::get_load_avg(double* p_avg1, double* p_avg5, double* p_avg15) {
return false;
}
bool libo4::realpath(const char* file_name, char* resolved_name,
int resolved_name_len) {
return false;
}
bool libo4::removeEscapeMessageFromJoblogByContext(const void* context) {
// Note: no tracing here! We run in signal handling context
return false;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,56 +22,69 @@
*
*/
// A C++ wrapper around the libo4 porting library. The libo4 porting library
// is a set of bridge functions into native AS/400 functionality.
// Class libo4 is a C++ wrapper around the libo4 porting library. It handles
// basic stuff like dynamic loading, library initialization etc.
// The libo4 porting library is a set of functions that bridge from the AIX
// runtime environment on OS/400 (aka PASE layer) into native OS/400
// functionality (aka ILE layer) to close some functional gaps that exist in
// the PASE layer.
#ifndef OS_AIX_VM_LIBO4_HPP
#define OS_AIX_VM_LIBO4_HPP
class libo4 {
public:
// Initialize the libo4 porting library.
// Returns true if succeeded, false if error.
static bool init();
// cleanup of the libo4 porting library.
// Triggers cleanup of the libo4 porting library.
static void cleanup();
// returns a number of memory statistics from the
// AS/400.
// Returns a number of memory statistics from OS/400.
//
// See libo4.h for details on this API.
//
// Specify NULL for numbers you are not interested in.
//
// returns false if an error happened. Activate OsMisc trace for
// Returns false if an error happened. Activate OsMisc trace for
// trace output.
//
static bool get_memory_info (unsigned long long* p_virt_total, unsigned long long* p_real_total,
unsigned long long* p_real_free, unsigned long long* p_pgsp_total, unsigned long long* p_pgsp_free);
static bool get_memory_info(unsigned long long* p_virt_total,
unsigned long long* p_real_total,
unsigned long long* p_real_free,
unsigned long long* p_pgsp_total,
unsigned long long* p_pgsp_free);
// returns information about system load
// Returns information about system load
// (similar to "loadavg()" under other Unices)
//
// Specify NULL for numbers you are not interested in.
//
// returns false if an error happened. Activate OsMisc trace for
// trace output.
//
static bool get_load_avg (double* p_avg1, double* p_avg5, double* p_avg15);
// this is a replacement for the "realpath()" API which does not really work
// on PASE
// See libo4.h for details on this API.
//
// Specify NULL for numbers you are not interested in.
//
// returns false if an error happened. Activate OsMisc trace for
// Returns false if an error happened. Activate OsMisc trace for
// trace output.
//
static bool realpath (const char* file_name,
char* resolved_name, int resolved_name_len);
static bool get_load_avg(double* p_avg1, double* p_avg5, double* p_avg15);
// This is a replacement for the "realpath()" API which does not really work
// in PASE together with the (case insensitive but case preserving)
// filesystem on OS/400.
//
// See libo4.h for details on this API.
//
// Returns false if an error happened. Activate OsMisc trace for
// trace output.
//
static bool realpath(const char* file_name, char* resolved_name,
int resolved_name_len);
// Call libo4_RemoveEscapeMessageFromJoblogByContext API to remove messages
// from the OS/400 job log.
//
// See libo4.h for details on this API.
static bool removeEscapeMessageFromJoblogByContext(const void* context);
};
#endif // OS_AIX_VM_LIBO4_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -180,10 +180,12 @@ bool libperfstat::get_cpuinfo(cpuinfo_t* pci) {
memset (&psct, '\0', sizeof(psct));
if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(PERFSTAT_CPU_TOTAL_T_LATEST), 1)) {
if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_61), 1)) {
if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_53), 1)) {
if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_71), 1)) {
if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_61), 1)) {
if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_53), 1)) {
trcVerbose("perfstat_cpu_total() failed (errno=%d)", errno);
return false;
}
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -337,10 +337,109 @@ typedef struct { /* global cpu information AIX 7.1 */
int spurrflag; /* set if running in spurr mode */
u_longlong_t version; /* version number (1, 2, etc.,) */
/* >>>>> END OF STRUCTURE DEFINITION <<<<< */
#define CURR_VERSION_CPU_TOTAL 1 /* Incremented by one for every new release *
/* #define CURR_VERSION_CPU_TOTAL 1 Incremented by one for every new release *
* of perfstat_cpu_total_t data structure */
} perfstat_cpu_total_t_71;
typedef struct { /* global cpu information AIX 7.2 / 6.1 TL6 (see oslevel -r) */
int ncpus; /* number of active logical processors */
int ncpus_cfg; /* number of configured processors */
char description[IDENTIFIER_LENGTH]; /* processor description (type/official name) */
u_longlong_t processorHZ; /* processor speed in Hz */
u_longlong_t user; /* raw total number of clock ticks spent in user mode */
u_longlong_t sys; /* raw total number of clock ticks spent in system mode */
u_longlong_t idle; /* raw total number of clock ticks spent idle */
u_longlong_t wait; /* raw total number of clock ticks spent waiting for I/O */
u_longlong_t pswitch; /* number of process switches (change in currently running process) */
u_longlong_t syscall; /* number of system calls executed */
u_longlong_t sysread; /* number of read system calls executed */
u_longlong_t syswrite; /* number of write system calls executed */
u_longlong_t sysfork; /* number of forks system calls executed */
u_longlong_t sysexec; /* number of execs system calls executed */
u_longlong_t readch; /* number of characters tranferred with read system call */
u_longlong_t writech; /* number of characters tranferred with write system call */
u_longlong_t devintrs; /* number of device interrupts */
u_longlong_t softintrs; /* number of software interrupts */
time_t lbolt; /* number of ticks since last reboot */
u_longlong_t loadavg[3]; /* (1<<SBITS) times the average number of runnables processes during the last 1, 5 and 15 minutes. */
/* To calculate the load average, divide the numbers by (1<<SBITS). SBITS is defined in <sys/proc.h>. */
u_longlong_t runque; /* length of the run queue (processes ready) */
u_longlong_t swpque; /* ength of the swap queue (processes waiting to be paged in) */
u_longlong_t bread; /* number of blocks read */
u_longlong_t bwrite; /* number of blocks written */
u_longlong_t lread; /* number of logical read requests */
u_longlong_t lwrite; /* number of logical write requests */
u_longlong_t phread; /* number of physical reads (reads on raw devices) */
u_longlong_t phwrite; /* number of physical writes (writes on raw devices) */
u_longlong_t runocc; /* updated whenever runque is updated, i.e. the runqueue is occupied.
* This can be used to compute the simple average of ready processes */
u_longlong_t swpocc; /* updated whenever swpque is updated. i.e. the swpqueue is occupied.
* This can be used to compute the simple average processes waiting to be paged in */
u_longlong_t iget; /* number of inode lookups */
u_longlong_t namei; /* number of vnode lookup from a path name */
u_longlong_t dirblk; /* number of 512-byte block reads by the directory search routine to locate an entry for a file */
u_longlong_t msg; /* number of IPC message operations */
u_longlong_t sema; /* number of IPC semaphore operations */
u_longlong_t rcvint; /* number of tty receive interrupts */
u_longlong_t xmtint; /* number of tyy transmit interrupts */
u_longlong_t mdmint; /* number of modem interrupts */
u_longlong_t tty_rawinch; /* number of raw input characters */
u_longlong_t tty_caninch; /* number of canonical input characters (always zero) */
u_longlong_t tty_rawoutch;/* number of raw output characters */
u_longlong_t ksched; /* number of kernel processes created */
u_longlong_t koverf; /* kernel process creation attempts where:
* -the user has forked to their maximum limit
* -the configuration limit of processes has been reached */
u_longlong_t kexit; /* number of kernel processes that became zombies */
u_longlong_t rbread; /* number of remote read requests */
u_longlong_t rcread; /* number of cached remote reads */
u_longlong_t rbwrt; /* number of remote writes */
u_longlong_t rcwrt; /* number of cached remote writes */
u_longlong_t traps; /* number of traps */
int ncpus_high; /* index of highest processor online */
u_longlong_t puser; /* raw number of physical processor tics in user mode */
u_longlong_t psys; /* raw number of physical processor tics in system mode */
u_longlong_t pidle; /* raw number of physical processor tics idle */
u_longlong_t pwait; /* raw number of physical processor tics waiting for I/O */
u_longlong_t decrintrs; /* number of decrementer tics interrupts */
u_longlong_t mpcrintrs; /* number of mpc's received interrupts */
u_longlong_t mpcsintrs; /* number of mpc's sent interrupts */
u_longlong_t phantintrs; /* number of phantom interrupts */
u_longlong_t idle_donated_purr; /* number of idle cycles donated by a dedicated partition enabled for donation */
u_longlong_t idle_donated_spurr;/* number of idle spurr cycles donated by a dedicated partition enabled for donation */
u_longlong_t busy_donated_purr; /* number of busy cycles donated by a dedicated partition enabled for donation */
u_longlong_t busy_donated_spurr;/* number of busy spurr cycles donated by a dedicated partition enabled for donation */
u_longlong_t idle_stolen_purr; /* number of idle cycles stolen by the hypervisor from a dedicated partition */
u_longlong_t idle_stolen_spurr; /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
u_longlong_t busy_stolen_purr; /* number of busy cycles stolen by the hypervisor from a dedicated partition */
u_longlong_t busy_stolen_spurr; /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
short iowait; /* number of processes that are asleep waiting for buffered I/O */
short physio; /* number of processes waiting for raw I/O */
longlong_t twait; /* number of threads that are waiting for filesystem direct(cio) */
u_longlong_t hpi; /* number of hypervisor page-ins */
u_longlong_t hpit; /* Time spent in hypervisor page-ins (in nanoseconds) */
u_longlong_t puser_spurr; /* number of spurr cycles spent in user mode */
u_longlong_t psys_spurr; /* number of spurr cycles spent in kernel mode */
u_longlong_t pidle_spurr; /* number of spurr cycles spent in idle mode */
u_longlong_t pwait_spurr; /* number of spurr cycles spent in wait mode */
int spurrflag; /* set if running in spurr mode */
u_longlong_t version; /* version number (1, 2, etc.,) */
u_longlong_t tb_last; /*time base counter */
u_longlong_t purr_coalescing; /* If the calling partition is
* authorized to see pool wide statistics then
* PURR cycles consumed to coalesce data
* else set to zero.*/
u_longlong_t spurr_coalescing; /* If the calling partition is
* authorized to see pool wide statistics then
* SPURR cycles consumed to coalesce data
* else set to zero.*/
/* >>>>> END OF STRUCTURE DEFINITION <<<<< */
#define CURR_VERSION_CPU_TOTAL 2 /* Incremented by one for every new release *
* of perfstat_cpu_total_t data structure */
} perfstat_cpu_total_t_72;
typedef union {
uint w;
struct {
@ -756,7 +855,7 @@ typedef struct { /* WPAR identifier */
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
#define PERFSTAT_PARTITON_TOTAL_T_LATEST perfstat_partition_total_t_71_1/* latest perfstat_partition_total_t structure */
#define PERFSTAT_CPU_TOTAL_T_LATEST perfstat_cpu_total_t_71 /* latest perfstat_cpu_total_t structure */
#define PERFSTAT_CPU_TOTAL_T_LATEST perfstat_cpu_total_t_72 /* latest perfstat_cpu_total_t structure */
#define PERFSTAT_WPAR_TOTAL_T_LATEST perfstat_wpar_total_t_71 /* latest perfstat_wpar_total_t structure */
class libperfstat {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -264,7 +264,7 @@ void Canonicalizer::do_LoadIndexed (LoadIndexed* x) {
assert(array == NULL || FoldStableValues, "not enabled");
// Constant fold loads from stable arrays.
if (array != NULL && index != NULL) {
if (!x->mismatched() && array != NULL && index != NULL) {
jint idx = index->value();
if (idx < 0 || idx >= array->value()->length()) {
// Leave the load as is. The range check will handle it.
@ -310,8 +310,6 @@ void Canonicalizer::do_StoreIndexed (StoreIndexed* x) {
return;
}
}
}

View File

@ -4227,11 +4227,11 @@ void GraphBuilder::append_char_access(ciMethod* callee, bool is_store) {
Value index = args->at(1);
if (is_store) {
Value value = args->at(2);
Instruction* store = append(new StoreIndexed(array, index, NULL, T_CHAR, value, state_before, false));
Instruction* store = append(new StoreIndexed(array, index, NULL, T_CHAR, value, state_before, false, true));
store->set_flag(Instruction::NeedsRangeCheckFlag, false);
_memory->store_value(value);
} else {
Instruction* load = append(new LoadIndexed(array, index, NULL, T_CHAR, state_before));
Instruction* load = append(new LoadIndexed(array, index, NULL, T_CHAR, state_before, true));
load->set_flag(Instruction::NeedsRangeCheckFlag, false);
push(load->type(), load);
}

View File

@ -147,10 +147,8 @@ IRScope::IRScope(Compilation* compilation, IRScope* caller, int caller_bci, ciMe
_wrote_volatile = false;
_start = NULL;
if (osr_bci == -1) {
_requires_phi_function.clear();
} else {
// selective creation of phi functions is not possibel in osr-methods
if (osr_bci != -1) {
// selective creation of phi functions is not possibel in osr-methods
_requires_phi_function.set_range(0, method->max_locals());
}
@ -540,7 +538,6 @@ ComputeLinearScanOrder::ComputeLinearScanOrder(Compilation* c, BlockBegin* start
{
TRACE_LINEAR_SCAN(2, tty->print_cr("***** computing linear-scan block order"));
init_visited();
count_edges(start_block, NULL);
if (compilation()->is_profiling()) {
@ -646,7 +643,6 @@ void ComputeLinearScanOrder::mark_loops() {
TRACE_LINEAR_SCAN(3, tty->print_cr("----- marking loops"));
_loop_map = BitMap2D(_num_loops, _max_block_id);
_loop_map.clear();
for (int i = _loop_end_blocks.length() - 1; i >= 0; i--) {
BlockBegin* loop_end = _loop_end_blocks.at(i);

View File

@ -912,14 +912,16 @@ BASE(AccessIndexed, AccessArray)
Value _index;
Value _length;
BasicType _elt_type;
bool _mismatched;
public:
// creation
AccessIndexed(Value array, Value index, Value length, BasicType elt_type, ValueStack* state_before)
AccessIndexed(Value array, Value index, Value length, BasicType elt_type, ValueStack* state_before, bool mismatched)
: AccessArray(as_ValueType(elt_type), array, state_before)
, _index(index)
, _length(length)
, _elt_type(elt_type)
, _mismatched(mismatched)
{
set_flag(Instruction::NeedsRangeCheckFlag, true);
ASSERT_VALUES
@ -929,6 +931,7 @@ BASE(AccessIndexed, AccessArray)
Value index() const { return _index; }
Value length() const { return _length; }
BasicType elt_type() const { return _elt_type; }
bool mismatched() const { return _mismatched; }
void clear_length() { _length = NULL; }
// perform elimination of range checks involving constants
@ -945,8 +948,8 @@ LEAF(LoadIndexed, AccessIndexed)
public:
// creation
LoadIndexed(Value array, Value index, Value length, BasicType elt_type, ValueStack* state_before)
: AccessIndexed(array, index, length, elt_type, state_before)
LoadIndexed(Value array, Value index, Value length, BasicType elt_type, ValueStack* state_before, bool mismatched = false)
: AccessIndexed(array, index, length, elt_type, state_before, mismatched)
, _explicit_null_check(NULL) {}
// accessors
@ -974,8 +977,9 @@ LEAF(StoreIndexed, AccessIndexed)
public:
// creation
StoreIndexed(Value array, Value index, Value length, BasicType elt_type, Value value, ValueStack* state_before, bool check_boolean)
: AccessIndexed(array, index, length, elt_type, state_before)
StoreIndexed(Value array, Value index, Value length, BasicType elt_type, Value value, ValueStack* state_before,
bool check_boolean, bool mismatched = false)
: AccessIndexed(array, index, length, elt_type, state_before, mismatched)
, _value(value), _profiled_method(NULL), _profiled_bci(0), _check_boolean(check_boolean)
{
set_flag(NeedsWriteBarrierFlag, (as_ValueType(elt_type)->is_object()));

View File

@ -1387,7 +1387,6 @@ Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
if (_vreg_flags.size_in_bits() == 0) {
BitMap2D temp(100, num_vreg_flags);
temp.clear();
_vreg_flags = temp;
}
_vreg_flags.at_put_grow(vreg_num, f, true);

View File

@ -562,14 +562,13 @@ void LinearScan::compute_local_live_sets() {
LIR_OpVisitState visitor;
BitMap2D local_interval_in_loop = BitMap2D(_num_virtual_regs, num_loops());
local_interval_in_loop.clear();
// iterate all blocks
for (int i = 0; i < num_blocks; i++) {
BlockBegin* block = block_at(i);
ResourceBitMap live_gen(live_size); live_gen.clear();
ResourceBitMap live_kill(live_size); live_kill.clear();
ResourceBitMap live_gen(live_size);
ResourceBitMap live_kill(live_size);
if (block->is_set(BlockBegin::exception_entry_flag)) {
// Phi functions at the begin of an exception handler are
@ -715,8 +714,8 @@ void LinearScan::compute_local_live_sets() {
block->set_live_gen (live_gen);
block->set_live_kill(live_kill);
block->set_live_in (ResourceBitMap(live_size)); block->live_in().clear();
block->set_live_out (ResourceBitMap(live_size)); block->live_out().clear();
block->set_live_in (ResourceBitMap(live_size));
block->set_live_out (ResourceBitMap(live_size));
TRACE_LINEAR_SCAN(4, tty->print("live_gen B%d ", block->block_id()); print_bitmap(block->live_gen()));
TRACE_LINEAR_SCAN(4, tty->print("live_kill B%d ", block->block_id()); print_bitmap(block->live_kill()));
@ -741,7 +740,7 @@ void LinearScan::compute_global_live_sets() {
bool change_occurred;
bool change_occurred_in_block;
int iteration_count = 0;
ResourceBitMap live_out(live_set_size()); live_out.clear(); // scratch set for calculations
ResourceBitMap live_out(live_set_size()); // scratch set for calculations
// Perform a backward dataflow analysis to compute live_out and live_in for each block.
// The loop is executed until a fixpoint is reached (no changes in an iteration)
@ -827,7 +826,6 @@ void LinearScan::compute_global_live_sets() {
// check that the live_in set of the first block is empty
ResourceBitMap live_in_args(ir()->start()->live_in().size());
live_in_args.clear();
if (!ir()->start()->live_in().is_same(live_in_args)) {
#ifdef ASSERT
tty->print_cr("Error: live_in set of first block must be empty (when this fails, virtual registers are used before they are defined)");
@ -1774,8 +1772,8 @@ void LinearScan::resolve_data_flow() {
int num_blocks = block_count();
MoveResolver move_resolver(this);
ResourceBitMap block_completed(num_blocks); block_completed.clear();
ResourceBitMap already_resolved(num_blocks); already_resolved.clear();
ResourceBitMap block_completed(num_blocks);
ResourceBitMap already_resolved(num_blocks);
int i;
for (i = 0; i < num_blocks; i++) {
@ -3750,7 +3748,6 @@ void MoveResolver::verify_before_resolve() {
ResourceBitMap used_regs(LinearScan::nof_regs + allocator()->frame_map()->argcount() + allocator()->max_spills());
used_regs.clear();
if (!_multiple_reads_allowed) {
for (i = 0; i < _mapping_from.length(); i++) {
Interval* it = _mapping_from.at(i);
@ -6319,7 +6316,6 @@ void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) {
void ControlFlowOptimizer::delete_jumps_to_return(BlockList* code) {
#ifdef ASSERT
ResourceBitMap return_converted(BlockBegin::number_of_blocks());
return_converted.clear();
#endif
for (int i = code->length() - 1; i >= 0; i--) {

View File

@ -53,7 +53,6 @@ class ValueSet: public CompilationResourceObj {
};
inline ValueSet::ValueSet() : _map(Instruction::number_of_instructions()) {
_map.clear();
}

View File

@ -449,7 +449,6 @@ ResourceBitMap ciMethod::live_local_oops_at_bci(int bci) {
OopMapCache::compute_one_oop_map(get_Method(), bci, &mask);
int mask_size = max_locals();
ResourceBitMap result(mask_size);
result.clear();
int i;
for (i = 0; i < mask_size ; i++ ) {
if (mask.is_oop(i)) result.set_bit(i);

View File

@ -4673,6 +4673,7 @@ void ClassFileParser::verify_legal_utf8(const unsigned char* buffer,
}
// Unqualified names may not contain the characters '.', ';', '[', or '/'.
// In class names, '/' separates unqualified names. This is verified in this function also.
// Method names also may not contain the characters '<' or '>', unless <init>
// or <clinit>. Note that method names may not be <init> or <clinit> in this
// method. Because these names have been checked as special cases before
@ -4698,8 +4699,16 @@ bool ClassFileParser::verify_unqualified_name(const char* name,
if (ch == ';' || ch == '[' ) {
return false; // do not permit '.', ';', or '['
}
if (type != ClassFileParser::LegalClass && ch == '/') {
return false; // do not permit '/' unless it's class name
if (ch == '/') {
// check for '//' or leading or trailing '/' which are not legal
// unqualified name must not be empty
if (type == ClassFileParser::LegalClass) {
if (p == name || p+1 >= name+length || *(p+1) == '/') {
return false;
}
} else {
return false; // do not permit '/' unless it's class name
}
}
if (type == ClassFileParser::LegalMethod && (ch == '<' || ch == '>')) {
return false; // do not permit '<' or '>' in method names
@ -5347,15 +5356,7 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa
if (!is_internal()) {
if (log_is_enabled(Info, class, load)) {
ResourceMark rm;
const char* module_name = NULL;
static const size_t modules_image_name_len = strlen(MODULES_IMAGE_NAME);
size_t stream_len = strlen(_stream->source());
// See if _stream->source() ends in "modules"
if (module_entry->is_named() && modules_image_name_len < stream_len &&
(strncmp(_stream->source() + stream_len - modules_image_name_len,
MODULES_IMAGE_NAME, modules_image_name_len) == 0)) {
module_name = module_entry->name()->as_C_string();
}
const char* module_name = (module_entry->name() == NULL) ? UNNAMED_MODULE : module_entry->name()->as_C_string();
if (log_is_enabled(Info, class, load)) {
ik->print_loading_log(LogLevel::Info, _loader_data, module_name, _stream);

View File

@ -181,26 +181,59 @@ bool ClassLoader::string_ends_with(const char* str, const char* str_to_find) {
}
// Used to obtain the package name from a fully qualified class name.
// It is the responsibility of the caller to establish ResourceMark.
const char* ClassLoader::package_from_name(const char* class_name) {
const char* last_slash = strrchr(class_name, '/');
// It is the responsibility of the caller to establish a ResourceMark.
const char* ClassLoader::package_from_name(const char* const class_name, bool* bad_class_name) {
if (class_name == NULL) {
if (bad_class_name != NULL) {
*bad_class_name = true;
}
return NULL;
}
if (bad_class_name != NULL) {
*bad_class_name = false;
}
const char* const last_slash = strrchr(class_name, '/');
if (last_slash == NULL) {
// No package name
return NULL;
}
int length = last_slash - class_name;
// A class name could have just the slash character in the name,
// resulting in a negative length.
char* class_name_ptr = (char*) class_name;
// Skip over '['s
if (*class_name_ptr == '[') {
do {
class_name_ptr++;
} while (*class_name_ptr == '[');
// Fully qualified class names should not contain a 'L'.
// Set bad_class_name to true to indicate that the package name
// could not be obtained due to an error condition.
// In this situation, is_same_class_package returns false.
if (*class_name_ptr == 'L') {
if (bad_class_name != NULL) {
*bad_class_name = true;
}
return NULL;
}
}
int length = last_slash - class_name_ptr;
// A class name could have just the slash character in the name.
if (length <= 0) {
// No package name
if (bad_class_name != NULL) {
*bad_class_name = true;
}
return NULL;
}
// drop name after last slash (including slash)
// Ex., "java/lang/String.class" => "java/lang"
char* pkg_name = NEW_RESOURCE_ARRAY(char, length + 1);
strncpy(pkg_name, class_name, length);
strncpy(pkg_name, class_name_ptr, length);
*(pkg_name+length) = '\0';
return (const char *)pkg_name;
@ -228,8 +261,9 @@ ClassPathDirEntry::ClassPathDirEntry(const char* dir) : ClassPathEntry() {
ClassFileStream* ClassPathDirEntry::open_stream(const char* name, TRAPS) {
// construct full path name
char path[JVM_MAXPATHLEN];
if (jio_snprintf(path, sizeof(path), "%s%s%s", _dir, os::file_separator(), name) == -1) {
char* path = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, JVM_MAXPATHLEN);
if (jio_snprintf(path, JVM_MAXPATHLEN, "%s%s%s", _dir, os::file_separator(), name) == -1) {
FREE_RESOURCE_ARRAY(char, path, JVM_MAXPATHLEN);
return NULL;
}
// check if file exists
@ -256,6 +290,7 @@ ClassFileStream* ClassPathDirEntry::open_stream(const char* name, TRAPS) {
if (UsePerfData) {
ClassLoader::perf_sys_classfile_bytes_read()->inc(num_read);
}
FREE_RESOURCE_ARRAY(char, path, JVM_MAXPATHLEN);
// Resource allocated
return new ClassFileStream(buffer,
st.st_size,
@ -264,6 +299,7 @@ ClassFileStream* ClassPathDirEntry::open_stream(const char* name, TRAPS) {
}
}
}
FREE_RESOURCE_ARRAY(char, path, JVM_MAXPATHLEN);
return NULL;
}
@ -344,9 +380,9 @@ u1* ClassPathZipEntry::open_versioned_entry(const char* name, jint* filesize, TR
if (is_multi_ver) {
int n;
char entry_name[JVM_MAXPATHLEN];
char* entry_name = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, JVM_MAXPATHLEN);
if (version > 0) {
n = jio_snprintf(entry_name, sizeof(entry_name), "META-INF/versions/%d/%s", version, name);
n = jio_snprintf(entry_name, JVM_MAXPATHLEN, "META-INF/versions/%d/%s", version, name);
entry_name[n] = '\0';
buffer = open_entry((const char*)entry_name, filesize, false, CHECK_NULL);
if (buffer == NULL) {
@ -355,7 +391,7 @@ u1* ClassPathZipEntry::open_versioned_entry(const char* name, jint* filesize, TR
}
if (buffer == NULL) {
for (int i = cur_ver; i >= base_version; i--) {
n = jio_snprintf(entry_name, sizeof(entry_name), "META-INF/versions/%d/%s", i, name);
n = jio_snprintf(entry_name, JVM_MAXPATHLEN, "META-INF/versions/%d/%s", i, name);
entry_name[n] = '\0';
buffer = open_entry((const char*)entry_name, filesize, false, CHECK_NULL);
if (buffer != NULL) {
@ -363,6 +399,7 @@ u1* ClassPathZipEntry::open_versioned_entry(const char* name, jint* filesize, TR
}
}
}
FREE_RESOURCE_ARRAY(char, entry_name, JVM_MAXPATHLEN);
}
}
return buffer;
@ -508,7 +545,8 @@ bool ctw_visitor(JImageFile* jimage,
const char* name, const char* extension, void* arg) {
if (strcmp(extension, "class") == 0) {
Thread* THREAD = Thread::current();
char path[JIMAGE_MAX_PATH];
ResourceMark rm(THREAD);
char* path = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, JIMAGE_MAX_PATH);
jio_snprintf(path, JIMAGE_MAX_PATH - 1, "%s/%s.class", package, name);
ClassLoader::compile_the_world_in(path, *(Handle*)arg, THREAD);
return !HAS_PENDING_EXCEPTION;
@ -750,9 +788,10 @@ ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const str
JavaThread* thread = JavaThread::current();
ClassPathEntry* new_entry = NULL;
if ((st->st_mode & S_IFREG) == S_IFREG) {
ResourceMark rm(thread);
// Regular file, should be a zip or jimage file
// Canonicalized filename
char canonical_path[JVM_MAXPATHLEN];
char* canonical_path = NEW_RESOURCE_ARRAY_IN_THREAD(thread, char, JVM_MAXPATHLEN);
if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
// This matches the classic VM
if (throw_exception) {
@ -777,14 +816,13 @@ ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const str
if (zip != NULL && error_msg == NULL) {
new_entry = new ClassPathZipEntry(zip, path, is_boot_append);
} else {
ResourceMark rm(thread);
char *msg;
if (error_msg == NULL) {
msg = NEW_RESOURCE_ARRAY(char, strlen(path) + 128); ;
msg = NEW_RESOURCE_ARRAY_IN_THREAD(thread, char, strlen(path) + 128); ;
jio_snprintf(msg, strlen(path) + 127, "error in opening JAR file %s", path);
} else {
int len = (int)(strlen(path) + strlen(error_msg) + 128);
msg = NEW_RESOURCE_ARRAY(char, len); ;
msg = NEW_RESOURCE_ARRAY_IN_THREAD(thread, char, len); ;
jio_snprintf(msg, len - 1, "error in opening JAR file <%s> %s", error_msg, path);
}
// Don't complain about bad jar files added via -Xbootclasspath/a:.
@ -1112,13 +1150,11 @@ bool ClassLoader::add_package(const char *fullq_class_name, s2 classpath_index,
assert(fullq_class_name != NULL, "just checking");
// Get package name from fully qualified class name.
const char *cp = strrchr(fullq_class_name, '/');
ResourceMark rm;
const char *cp = package_from_name(fullq_class_name);
if (cp != NULL) {
int len = cp - fullq_class_name;
PackageEntryTable* pkg_entry_tbl =
ClassLoaderData::the_null_class_loader_data()->packages();
TempNewSymbol pkg_symbol =
SymbolTable::new_symbol(fullq_class_name, len, CHECK_false);
PackageEntryTable* pkg_entry_tbl = ClassLoaderData::the_null_class_loader_data()->packages();
TempNewSymbol pkg_symbol = SymbolTable::new_symbol(cp, CHECK_false);
PackageEntry* pkg_entry = pkg_entry_tbl->lookup_only(pkg_symbol);
if (pkg_entry != NULL) {
assert(classpath_index != -1, "Unexpected classpath_index");
@ -1226,11 +1262,9 @@ s2 ClassLoader::classloader_type(Symbol* class_name, ClassPathEntry* e, int clas
// jimage, it is determined by the class path entry.
jshort loader_type = ClassLoader::APP_LOADER;
if (e->is_jrt()) {
int length = 0;
const jbyte* pkg_string = InstanceKlass::package_from_name(class_name, length);
if (pkg_string != NULL) {
ResourceMark rm;
TempNewSymbol pkg_name = SymbolTable::new_symbol((const char*)pkg_string, length, THREAD);
ResourceMark rm;
TempNewSymbol pkg_name = InstanceKlass::package_from_name(class_name, CHECK_0);
if (pkg_name != NULL) {
const char* pkg_name_C_string = (const char*)(pkg_name->as_C_string());
ClassPathImageEntry* cpie = (ClassPathImageEntry*)e;
JImageFile* jimage = cpie->jimage();

View File

@ -444,7 +444,9 @@ class ClassLoader: AllStatic {
static bool string_ends_with(const char* str, const char* str_to_find);
// obtain package name from a fully qualified class name
static const char* package_from_name(const char* class_name);
// *bad_class_name is set to true if there's a problem with parsing class_name, to
// distinguish from a class_name with no package name, as both cases have a NULL return value
static const char* package_from_name(const char* const class_name, bool* bad_class_name = NULL);
static bool is_jrt(const char* name) { return string_ends_with(name, MODULES_IMAGE_NAME); }

View File

@ -54,12 +54,14 @@ public:
const s2 classpath_index,
instanceKlassHandle result, TRAPS) {
if (ClassLoader::add_package(_file_name, classpath_index, THREAD)) {
#if INCLUDE_CDS
if (DumpSharedSpaces) {
s2 classloader_type = ClassLoader::classloader_type(
class_name, e, classpath_index, CHECK_(result));
result->set_shared_classpath_index(classpath_index);
result->set_class_loader_type(classloader_type);
}
#endif
return result;
} else {
return instanceKlassHandle(); // NULL

View File

@ -3245,6 +3245,15 @@ void java_lang_invoke_MemberName::set_vmindex(oop mname, intptr_t index) {
mname->address_field_put(_vmindex_offset, (address) index);
}
bool java_lang_invoke_MemberName::equals(oop mn1, oop mn2) {
if (mn1 == mn2) {
return true;
}
return (vmtarget(mn1) == vmtarget(mn2) && flags(mn1) == flags(mn2) &&
vmindex(mn1) == vmindex(mn2) &&
clazz(mn1) == clazz(mn2));
}
oop java_lang_invoke_LambdaForm::vmentry(oop lform) {
assert(is_instance(lform), "wrong type");
return lform->obj_field(_vmentry_offset);

View File

@ -1114,6 +1114,8 @@ class java_lang_invoke_MemberName: AllStatic {
static int flags_offset_in_bytes() { return _flags_offset; }
static int vmtarget_offset_in_bytes() { return _vmtarget_offset; }
static int vmindex_offset_in_bytes() { return _vmindex_offset; }
static bool equals(oop mt1, oop mt2);
};

View File

@ -201,7 +201,7 @@ ModuleEntryTable::~ModuleEntryTable() {
}
void ModuleEntryTable::create_unnamed_module(ClassLoaderData* loader_data) {
assert_locked_or_safepoint(Module_lock);
assert(Module_lock->owned_by_self(), "should have the Module_lock");
// Each ModuleEntryTable has exactly one unnamed module
if (loader_data->is_the_null_class_loader_data()) {
@ -227,7 +227,7 @@ void ModuleEntryTable::create_unnamed_module(ClassLoaderData* loader_data) {
ModuleEntry* ModuleEntryTable::new_entry(unsigned int hash, Handle module_handle, Symbol* name,
Symbol* version, Symbol* location,
ClassLoaderData* loader_data) {
assert_locked_or_safepoint(Module_lock);
assert(Module_lock->owned_by_self(), "should have the Module_lock");
ModuleEntry* entry = (ModuleEntry*) NEW_C_HEAP_ARRAY(char, entry_size(), mtModule);
// Initialize everything BasicHashtable would
@ -258,7 +258,7 @@ ModuleEntry* ModuleEntryTable::new_entry(unsigned int hash, Handle module_handle
}
void ModuleEntryTable::add_entry(int index, ModuleEntry* new_entry) {
assert_locked_or_safepoint(Module_lock);
assert(Module_lock->owned_by_self(), "should have the Module_lock");
Hashtable<Symbol*, mtModule>::add_entry(index, (HashtableEntry<Symbol*, mtModule>*)new_entry);
}
@ -268,7 +268,7 @@ ModuleEntry* ModuleEntryTable::locked_create_entry_or_null(Handle module_handle,
Symbol* module_location,
ClassLoaderData* loader_data) {
assert(module_name != NULL, "ModuleEntryTable locked_create_entry_or_null should never be called for unnamed module.");
assert_locked_or_safepoint(Module_lock);
assert(Module_lock->owned_by_self(), "should have the Module_lock");
// Check if module already exists.
if (lookup_only(module_name) != NULL) {
return NULL;
@ -309,7 +309,7 @@ void ModuleEntryTable::purge_all_module_reads() {
}
void ModuleEntryTable::finalize_javabase(Handle module_handle, Symbol* version, Symbol* location) {
assert_locked_or_safepoint(Module_lock);
assert(Module_lock->owned_by_self(), "should have the Module_lock");
ClassLoaderData* boot_loader_data = ClassLoaderData::the_null_class_loader_data();
ModuleEntryTable* module_table = boot_loader_data->modules();

View File

@ -568,8 +568,8 @@ void Modules::add_module_exports(jobject from_module, jstring package, jobject t
to_module_entry->is_named() ?
to_module_entry->name()->as_C_string() : UNNAMED_MODULE);
// Do nothing if modules are the same or if package is already exported unqualifiedly.
if (from_module_entry != to_module_entry && !package_entry->is_unqual_exported()) {
// Do nothing if modules are the same.
if (from_module_entry != to_module_entry) {
package_entry->set_exported(to_module_entry);
}
}

View File

@ -49,7 +49,7 @@ bool PackageEntry::is_qexported_to(ModuleEntry* m) const {
// Add a module to the package's qualified export list.
void PackageEntry::add_qexport(ModuleEntry* m) {
assert_locked_or_safepoint(Module_lock);
assert(Module_lock->owned_by_self(), "should have the Module_lock");
if (!has_qual_exports_list()) {
// Lazily create a package's qualified exports list.
// Initial size is small, do not anticipate export lists to be large.
@ -157,7 +157,7 @@ PackageEntryTable::~PackageEntryTable() {
}
PackageEntry* PackageEntryTable::new_entry(unsigned int hash, Symbol* name, ModuleEntry* module) {
assert_locked_or_safepoint(Module_lock);
assert(Module_lock->owned_by_self(), "should have the Module_lock");
PackageEntry* entry = (PackageEntry*) NEW_C_HEAP_ARRAY(char, entry_size(), mtModule);
// Initialize everything BasicHashtable would
@ -180,14 +180,14 @@ PackageEntry* PackageEntryTable::new_entry(unsigned int hash, Symbol* name, Modu
}
void PackageEntryTable::add_entry(int index, PackageEntry* new_entry) {
assert_locked_or_safepoint(Module_lock);
assert(Module_lock->owned_by_self(), "should have the Module_lock");
Hashtable<Symbol*, mtModule>::add_entry(index, (HashtableEntry<Symbol*, mtModule>*)new_entry);
}
// Create package in loader's package entry table and return the entry.
// If entry already exists, return null. Assume Module lock was taken by caller.
PackageEntry* PackageEntryTable::locked_create_entry_or_null(Symbol* name, ModuleEntry* module) {
assert_locked_or_safepoint(Module_lock);
assert(Module_lock->owned_by_self(), "should have the Module_lock");
// Check if package already exists. Return NULL if it does.
if (lookup_only(name) != NULL) {
return NULL;

View File

@ -40,11 +40,7 @@
// package is exported to.
//
// Packages can be exported in the following 3 ways:
// - not exported: the package has not been explicitly qualified to a
// particular module nor has it been specified to be
// unqualifiedly exported to all modules. If all states
// of exportedness are false, the package is considered
// not exported.
// - not exported: the package does not have qualified or unqualified exports.
// - qualified exports: the package has been explicitly qualified to at least
// one particular module or has been qualifiedly exported
// to all unnamed modules.
@ -125,6 +121,7 @@ public:
return _is_exported_unqualified;
}
void set_unqual_exported() {
assert(Module_lock->owned_by_self(), "should have the Module_lock");
_is_exported_unqualified = true;
_is_exported_allUnnamed = false;
_qualified_exports = NULL;

View File

@ -70,6 +70,7 @@
#include "services/threadService.hpp"
#include "trace/traceMacros.hpp"
#include "utilities/macros.hpp"
#include "utilities/stringUtils.hpp"
#include "utilities/ticks.hpp"
#if INCLUDE_CDS
#include "classfile/sharedClassUtil.hpp"
@ -1154,12 +1155,10 @@ Klass* SystemDictionary::resolve_from_stream(Symbol* class_name,
// It is illegal to define classes in the "java." package from
// JVM_DefineClass or jni_DefineClass unless you're the bootclassloader
ResourceMark rm(THREAD);
char* name = parsed_name->as_C_string();
char* index = strrchr(name, '/');
*index = '\0'; // chop to just the package name
while ((index = strchr(name, '/')) != NULL) {
*index = '.'; // replace '/' with '.' in package name
}
TempNewSymbol pkg_name = InstanceKlass::package_from_name(parsed_name, CHECK_NULL);
assert(pkg_name != NULL, "Error in parsing package name starting with 'java/'");
char* name = pkg_name->as_C_string();
StringUtils::replace_no_expand(name, "/", ".");
const char* msg_text = "Prohibited package name: ";
size_t len = strlen(msg_text) + strlen(name) + 1;
char* message = NEW_RESOURCE_ARRAY(char, len);
@ -1257,6 +1256,7 @@ instanceKlassHandle SystemDictionary::load_shared_class(
bool SystemDictionary::is_shared_class_visible(Symbol* class_name,
instanceKlassHandle ik,
Handle class_loader, TRAPS) {
ResourceMark rm;
int path_index = ik->shared_classpath_index();
SharedClassPathEntry* ent =
(SharedClassPathEntry*)FileMapInfo::shared_classpath(path_index);
@ -1270,12 +1270,11 @@ bool SystemDictionary::is_shared_class_visible(Symbol* class_name,
TempNewSymbol pkg_name = NULL;
PackageEntry* pkg_entry = NULL;
ModuleEntry* mod_entry = NULL;
int length = 0;
const char* pkg_string = NULL;
ClassLoaderData* loader_data = class_loader_data(class_loader);
const jbyte* pkg_string = InstanceKlass::package_from_name(class_name, length);
if (pkg_string != NULL) {
pkg_name = SymbolTable::new_symbol((const char*)pkg_string,
length, CHECK_(false));
pkg_name = InstanceKlass::package_from_name(class_name, CHECK_false);
if (pkg_name != NULL) {
pkg_string = pkg_name->as_C_string();
if (loader_data != NULL) {
pkg_entry = loader_data->packages()->lookup_only(pkg_name);
}
@ -1432,15 +1431,14 @@ instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Ha
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
if (class_loader.is_null()) {
int length = 0;
ResourceMark rm;
PackageEntry* pkg_entry = NULL;
bool search_only_bootloader_append = false;
ClassLoaderData *loader_data = class_loader_data(class_loader);
// Find the package in the boot loader's package entry table.
const jbyte* pkg_string = InstanceKlass::package_from_name(class_name, length);
if (pkg_string != NULL) {
TempNewSymbol pkg_name = SymbolTable::new_symbol((const char*)pkg_string, length, CHECK_(nh));
TempNewSymbol pkg_name = InstanceKlass::package_from_name(class_name, CHECK_NULL);
if (pkg_name != NULL) {
pkg_entry = loader_data->packages()->lookup_only(pkg_name);
}
@ -1477,7 +1475,7 @@ instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Ha
assert(!DumpSharedSpaces, "Archive dumped after module system initialization");
// After the module system has been initialized, check if the class'
// package is in a module defined to the boot loader.
if (pkg_string == NULL || pkg_entry == NULL || pkg_entry->in_unnamed_module()) {
if (pkg_name == NULL || pkg_entry == NULL || pkg_entry->in_unnamed_module()) {
// Class is either in the unnamed package, in a named package
// within a module not defined to the boot loader or in a
// a named package within the unnamed module. In all cases,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,7 +48,7 @@ public:
static bool is_shared_class_visible_for_classloader(
instanceKlassHandle ik,
Handle class_loader,
const jbyte* pkg_string,
const char* pkg_string,
Symbol* pkg_name,
PackageEntry* pkg_entry,
ModuleEntry* mod_entry,

View File

@ -137,11 +137,6 @@ MethodLiveness::MethodLiveness(Arena* arena, ciMethod* method)
_arena = arena;
_method = method;
_bit_map_size_bits = method->max_locals();
#ifdef COMPILER1
_bci_block_start.clear();
#endif
}
void MethodLiveness::compute_liveness() {
@ -587,14 +582,6 @@ MethodLiveness::BasicBlock::BasicBlock(MethodLiveness *analyzer, int start, int
new (analyzer->arena()) GrowableArray<MethodLiveness::BasicBlock*>(analyzer->arena(), 5, 0, NULL);
_exception_predecessors =
new (analyzer->arena()) GrowableArray<MethodLiveness::BasicBlock*>(analyzer->arena(), 5, 0, NULL);
_normal_exit.clear();
_exception_exit.clear();
_entry.clear();
// this initialization is not strictly necessary.
// _gen and _kill are cleared at the beginning of compute_gen_kill_range()
_gen.clear();
_kill.clear();
}
@ -1020,7 +1007,6 @@ MethodLivenessResult MethodLiveness::BasicBlock::get_liveness_at(ciMethod* metho
_last_bci = bci;
}
answer.clear();
answer.set_union(_normal_exit);
answer.set_difference(_kill);
answer.set_union(_gen);

View File

@ -1605,6 +1605,9 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
_inter_sweep_timer.reset();
_inter_sweep_timer.start();
// No longer a need to do a concurrent collection for Metaspace.
MetaspaceGC::set_should_concurrent_collect(false);
gch->post_full_gc_dump(gc_timer);
gc_timer->register_gc_end();

View File

@ -3474,7 +3474,8 @@ void G1CollectedHeap::restore_after_evac_failure() {
double remove_self_forwards_start = os::elapsedTime();
remove_self_forwarding_pointers();
_preserved_marks_set.restore(workers());
SharedRestorePreservedMarksTaskExecutor task_executor(workers());
_preserved_marks_set.restore(&task_executor);
g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
}

View File

@ -109,7 +109,7 @@ inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
_old_set.remove(hr);
}
// It dirties the cards that cover the block so that so that the post
// It dirties the cards that cover the block so that the post
// write barrier never queues anything when updating objects on this
// block. It is assumed (and in fact we assert) that the block
// belongs to a young region.

View File

@ -80,7 +80,7 @@ void G1StringDedupStat::print_summary(const G1StringDedupStat& last_stat, const
log_info(gc, stringdedup)(
"Concurrent String Deduplication "
G1_STRDEDUP_BYTES_FORMAT_NS "->" G1_STRDEDUP_BYTES_FORMAT_NS "(" G1_STRDEDUP_BYTES_FORMAT_NS "), avg "
G1_STRDEDUP_PERCENT_FORMAT_NS ", " G1_STRDEDUP_TIME_FORMAT "]",
G1_STRDEDUP_PERCENT_FORMAT_NS ", " G1_STRDEDUP_TIME_FORMAT,
G1_STRDEDUP_BYTES_PARAM(last_stat._new_bytes),
G1_STRDEDUP_BYTES_PARAM(last_stat._new_bytes - last_stat._deduped_bytes),
G1_STRDEDUP_BYTES_PARAM(last_stat._deduped_bytes),

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/mutableSpace.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psOldGen.hpp"
@ -237,8 +238,53 @@ void PSPromotionManager::register_preserved_marks(PreservedMarks* preserved_mark
_preserved_marks = preserved_marks;
}
class ParRestoreGCTask : public GCTask {
private:
const uint _id;
PreservedMarksSet* const _preserved_marks_set;
volatile size_t* const _total_size_addr;
public:
virtual char* name() {
return (char*) "preserved mark restoration task";
}
virtual void do_it(GCTaskManager* manager, uint which){
_preserved_marks_set->get(_id)->restore_and_increment(_total_size_addr);
}
ParRestoreGCTask(uint id,
PreservedMarksSet* preserved_marks_set,
volatile size_t* total_size_addr)
: _id(id),
_preserved_marks_set(preserved_marks_set),
_total_size_addr(total_size_addr) { }
};
class PSRestorePreservedMarksTaskExecutor : public RestorePreservedMarksTaskExecutor {
private:
GCTaskManager* _gc_task_manager;
public:
PSRestorePreservedMarksTaskExecutor(GCTaskManager* gc_task_manager)
: _gc_task_manager(gc_task_manager) { }
void restore(PreservedMarksSet* preserved_marks_set,
volatile size_t* total_size_addr) {
// GCTask / GCTaskQueue are ResourceObjs
ResourceMark rm;
GCTaskQueue* q = GCTaskQueue::create();
for (uint i = 0; i < preserved_marks_set->num(); i += 1) {
q->enqueue(new ParRestoreGCTask(i, preserved_marks_set, total_size_addr));
}
_gc_task_manager->execute_and_wait(q);
}
};
void PSPromotionManager::restore_preserved_marks() {
_preserved_marks_set->restore(PSScavenge::gc_task_manager());
PSRestorePreservedMarksTaskExecutor task_executor(PSScavenge::gc_task_manager());
_preserved_marks_set->restore(&task_executor);
}
void PSPromotionManager::drain_stacks_depth(bool totally_drain) {

View File

@ -739,7 +739,8 @@ void DefNewGeneration::remove_forwarding_pointers() {
eden()->object_iterate(&rspc);
from()->object_iterate(&rspc);
_preserved_marks_set.restore(GenCollectedHeap::heap()->workers());
SharedRestorePreservedMarksTaskExecutor task_executor(GenCollectedHeap::heap()->workers());
_preserved_marks_set.restore(&task_executor);
}
void DefNewGeneration::handle_promotion_failure(oop old) {

View File

@ -386,7 +386,7 @@ size_t CollectedHeap::max_tlab_size() const {
// initialized by this point, a fact that we assert when doing the
// card-mark.)
// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
// G1 concurrent marking is in progress an SATB (pre-write-)barrier is
// G1 concurrent marking is in progress an SATB (pre-write-)barrier
// is used to remember the pre-value of any store. Initializing
// stores will not need this barrier, so we need not worry about
// compensating for the missing pre-barrier here. Turning now

View File

@ -28,9 +28,6 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc/parallel/gcTaskManager.hpp"
#endif
void PreservedMarks::restore() {
while (!_stack.is_empty()) {
@ -40,6 +37,15 @@ void PreservedMarks::restore() {
assert_empty();
}
void PreservedMarks::restore_and_increment(volatile size_t* const total_size_addr) {
const size_t stack_size = size();
restore();
// Only do the atomic add if the size is > 0.
if (stack_size > 0) {
Atomic::add(stack_size, total_size_addr);
}
}
#ifndef PRODUCT
void PreservedMarks::assert_empty() {
assert(_stack.is_empty(), "stack expected to be empty, size = "SIZE_FORMAT,
@ -82,13 +88,7 @@ public:
virtual void work(uint worker_id) {
uint task_id = 0;
while (!_sub_tasks.is_task_claimed(/* reference */ task_id)) {
PreservedMarks* const preserved_marks = _preserved_marks_set->get(task_id);
const size_t size = preserved_marks->size();
preserved_marks->restore();
// Only do the atomic add if the size is > 0.
if (size > 0) {
Atomic::add(size, _total_size_addr);
}
_preserved_marks_set->get(task_id)->restore_and_increment(_total_size_addr);
}
_sub_tasks.all_tasks_completed();
}
@ -104,53 +104,6 @@ public:
}
};
void PreservedMarksSet::restore_internal(WorkGang* workers,
volatile size_t* total_size_addr) {
assert(workers != NULL, "pre-condition");
ParRestoreTask task(workers->active_workers(), this, total_size_addr);
workers->run_task(&task);
}
#if INCLUDE_ALL_GCS
class ParRestoreGCTask : public GCTask {
private:
const uint _id;
PreservedMarksSet* const _preserved_marks_set;
volatile size_t* const _total_size_addr;
public:
virtual char* name() { return (char*) "preserved mark restoration task"; }
virtual void do_it(GCTaskManager* manager, uint which) {
PreservedMarks* const preserved_marks = _preserved_marks_set->get(_id);
const size_t size = preserved_marks->size();
preserved_marks->restore();
// Only do the atomic add if the size is > 0.
if (size > 0) {
Atomic::add(size, _total_size_addr);
}
}
ParRestoreGCTask(uint id,
PreservedMarksSet* preserved_marks_set,
volatile size_t* total_size_addr)
: _id(id),
_preserved_marks_set(preserved_marks_set),
_total_size_addr(total_size_addr) { }
};
void PreservedMarksSet::restore_internal(GCTaskManager* gc_task_manager,
volatile size_t* total_size_addr) {
// GCTask / GCTaskQueue are ResourceObjs
ResourceMark rm;
GCTaskQueue* q = GCTaskQueue::create();
for (uint i = 0; i < num(); i += 1) {
q->enqueue(new ParRestoreGCTask(i, this, total_size_addr));
}
gc_task_manager->execute_and_wait(q);
}
#endif
void PreservedMarksSet::reclaim() {
assert_empty();
@ -176,3 +129,16 @@ void PreservedMarksSet::assert_empty() {
}
}
#endif // ndef PRODUCT
void SharedRestorePreservedMarksTaskExecutor::restore(PreservedMarksSet* preserved_marks_set,
volatile size_t* total_size_addr) {
if (_workers == NULL) {
for (uint i = 0; i < preserved_marks_set->num(); i += 1) {
*total_size_addr += preserved_marks_set->get(i)->size();
preserved_marks_set->get(i)->restore();
}
} else {
ParRestoreTask task(_workers->active_workers(), preserved_marks_set, total_size_addr);
_workers->run_task(&task);
}
}

View File

@ -30,7 +30,7 @@
#include "oops/oop.hpp"
#include "utilities/stack.hpp"
class GCTaskManager;
class PreservedMarksSet;
class WorkGang;
class PreservedMarks VALUE_OBJ_CLASS_SPEC {
@ -61,6 +61,7 @@ public:
// reclaim the memory taken up by the stack segments.
void restore();
void restore_and_increment(volatile size_t* const _total_size_addr);
inline static void init_forwarded_mark(oop obj);
// Assert the stack is empty and has no cached segments.
@ -75,6 +76,24 @@ public:
virtual void do_object(oop obj);
};
class RestorePreservedMarksTaskExecutor {
public:
void virtual restore(PreservedMarksSet* preserved_marks_set,
volatile size_t* total_size_addr) = 0;
};
class SharedRestorePreservedMarksTaskExecutor : public RestorePreservedMarksTaskExecutor {
private:
WorkGang* _workers;
public:
SharedRestorePreservedMarksTaskExecutor(WorkGang* workers) : _workers(workers) { }
void restore(PreservedMarksSet* preserved_marks_set,
volatile size_t* total_size_addr);
};
class PreservedMarksSet : public CHeapObj<mtGC> {
private:
// true -> _stacks will be allocated in the C heap
@ -91,13 +110,6 @@ private:
// or == NULL if they have not.
Padded<PreservedMarks>* _stacks;
// Internal version of restore() that uses a WorkGang for parallelism.
void restore_internal(WorkGang* workers, volatile size_t* total_size_addr);
// Internal version of restore() that uses a GCTaskManager for parallelism.
void restore_internal(GCTaskManager* gc_task_manager,
volatile size_t* total_size_addr);
public:
uint num() const { return _num; }
@ -111,14 +123,11 @@ public:
// Allocate stack array.
void init(uint num);
// Itrerate over all stacks, restore all presered marks, and reclaim
// the memory taken up by the stack segments. If the executor is
// NULL, restoration will be done serially. If the executor is not
// NULL, restoration could be done in parallel (when it makes
// sense). Supported executors: WorkGang (Serial, CMS, G1),
// GCTaskManager (PS).
template <class E>
inline void restore(E* executor);
// Iterate over all stacks, restore all preserved marks, and reclaim
// the memory taken up by the stack segments.
// Supported executors: SharedRestorePreservedMarksTaskExecutor (Serial, CMS, G1),
// PSRestorePreservedMarksTaskExecutor (PS).
inline void restore(RestorePreservedMarksTaskExecutor* executor);
// Reclaim stack array.
void reclaim();

View File

@ -49,8 +49,7 @@ inline void PreservedMarks::init_forwarded_mark(oop obj) {
obj->init_mark();
}
template <class E>
inline void PreservedMarksSet::restore(E* executor) {
inline void PreservedMarksSet::restore(RestorePreservedMarksTaskExecutor* executor) {
volatile size_t total_size = 0;
#ifdef ASSERT
@ -61,17 +60,7 @@ inline void PreservedMarksSet::restore(E* executor) {
}
#endif // def ASSERT
if (executor == NULL) {
for (uint i = 0; i < _num; i += 1) {
total_size += get(i)->size();
get(i)->restore();
}
} else {
// Right now, if the executor is not NULL we do the work in
// parallel. In the future we might want to do the restoration
// serially, if there's only a small number of marks per stack.
restore_internal(executor, &total_size);
}
executor->restore(this, &total_size);
assert_empty();
assert(total_size == total_size_before,

View File

@ -108,8 +108,9 @@ public:
// do nothing. tlabs must be inited by initialize() calls
}
static const size_t min_size() { return align_object_size(MinTLABSize / HeapWordSize) + alignment_reserve(); }
static const size_t max_size() { assert(_max_size != 0, "max_size not set up"); return _max_size; }
static size_t min_size() { return align_object_size(MinTLABSize / HeapWordSize) + alignment_reserve(); }
static size_t max_size() { assert(_max_size != 0, "max_size not set up"); return _max_size; }
static size_t max_size_in_bytes() { return max_size() * BytesPerWord; }
static void set_max_size(size_t max_size) { _max_size = max_size; }
HeapWord* start() const { return _start; }

View File

@ -44,11 +44,6 @@ void AbstractWorkGang::initialize_workers() {
vm_exit_out_of_memory(0, OOM_MALLOC_ERROR, "Cannot create GangWorker array.");
}
_active_workers = ParallelGCThreads;
if (UseDynamicNumberOfGCThreads && !FLAG_IS_CMDLINE(ParallelGCThreads)) {
_active_workers = 1U;
}
add_workers(true);
}
@ -60,6 +55,10 @@ AbstractGangWorker* AbstractWorkGang::install_worker(uint worker_id) {
}
void AbstractWorkGang::add_workers(bool initializing) {
add_workers(_active_workers, initializing);
}
void AbstractWorkGang::add_workers(uint active_workers, bool initializing) {
os::ThreadType worker_type;
if (are_ConcurrentGC_threads()) {
@ -69,7 +68,7 @@ void AbstractWorkGang::add_workers(bool initializing) {
}
_created_workers = WorkerManager::add_workers(this,
_active_workers,
active_workers,
_total_workers,
_created_workers,
worker_type,
@ -268,10 +267,11 @@ void WorkGang::run_task(AbstractGangTask* task) {
}
void WorkGang::run_task(AbstractGangTask* task, uint num_workers) {
guarantee(num_workers <= active_workers(),
"Trying to execute task %s with %u workers which is more than the amount of active workers %u.",
task->name(), num_workers, active_workers());
guarantee(num_workers <= total_workers(),
"Trying to execute task %s with %u workers which is more than the amount of total workers %u.",
task->name(), num_workers, total_workers());
guarantee(num_workers > 0, "Trying to execute task %s with zero workers", task->name());
add_workers(num_workers, false);
_dispatcher->coordinator_execute_on_workers(task, num_workers);
}

View File

@ -170,6 +170,9 @@ class AbstractWorkGang : public CHeapObj<mtInternal> {
// Add GC workers as needed.
void add_workers(bool initializing);
// Add GC workers as needed to reach the specified number of workers.
void add_workers(uint active_workers, bool initializing);
// Return the Ith worker.
AbstractGangWorker* worker(uint i) const;
@ -214,7 +217,8 @@ public:
virtual void run_task(AbstractGangTask* task);
// Run a task with the given number of workers, returns
// when the task is done. The number of workers must be at most the number of
// active workers.
// active workers. Additional workers may be created if an insufficient
// number currently exists.
void run_task(AbstractGangTask* task, uint num_workers);
protected:

View File

@ -40,31 +40,6 @@
#define JVMCI_ERROR_OK(...) JVMCI_ERROR_(JVMCIEnv::ok, __VA_ARGS__)
#define CHECK_OK CHECK_(JVMCIEnv::ok)
class ParseClosure : public StackObj {
int _lineNo;
char* _filename;
bool _abort;
protected:
void abort() { _abort = true; }
void warn_and_abort(const char* message) {
warn(message);
abort();
}
void warn(const char* message) {
warning("Error at line %d while parsing %s: %s", _lineNo, _filename == NULL ? "?" : _filename, message);
}
public:
ParseClosure() : _lineNo(0), _filename(NULL), _abort(false) {}
void parse_line(char* line) {
_lineNo++;
do_line(line);
}
virtual void do_line(char* line) = 0;
int lineNo() { return _lineNo; }
bool is_aborted() { return _abort; }
void set_filename(char* path) {_filename = path; _lineNo = 0;}
};
class JVMCIRuntime: public AllStatic {
public:
// Constants describing whether JVMCI wants to be able to adjust the compilation

View File

@ -484,13 +484,13 @@ void LogConfiguration::print_command_line_help(FILE* out) {
" -Xlog:gc::uptime,tid\n"
"\t Log messages tagged with 'gc' tag using 'info' level to output 'stdout', using 'uptime' and 'tid' decorations.\n\n"
" -Xlog:gc*=info,rt*=off\n"
"\t Log messages tagged with at least 'gc' using 'info' level, but turn off logging of messages tagged with 'rt'.\n"
"\t (Messages tagged with both 'gc' and 'rt' will not be logged.)\n\n"
" -Xlog:gc*=info,safepoint*=off\n"
"\t Log messages tagged with at least 'gc' using 'info' level, but turn off logging of messages tagged with 'safepoint'.\n"
"\t (Messages tagged with both 'gc' and 'safepoint' will not be logged.)\n\n"
" -Xlog:disable -Xlog:rt=trace:rttrace.txt\n"
" -Xlog:disable -Xlog:safepoint=trace:safepointtrace.txt\n"
"\t Turn off all logging, including warnings and errors,\n"
"\t and then enable messages tagged with 'rt' using 'trace' level to file 'rttrace.txt'.\n");
"\t and then enable messages tagged with 'safepoint' using 'trace' level to file 'safepointtrace.txt'.\n");
}
void LogConfiguration::rotate_all_outputs() {

View File

@ -2933,7 +2933,7 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a
// Don't use large pages for the class space.
bool large_pages = false;
#ifndef AARCH64
#if !(defined(AARCH64) || defined(AIX))
ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
_reserve_alignment,
large_pages,
@ -2945,18 +2945,25 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a
// bits.
if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
metaspace_rs = ReservedSpace(compressed_class_space_size(),
_reserve_alignment,
large_pages,
requested_addr);
_reserve_alignment,
large_pages,
requested_addr);
}
if (! metaspace_rs.is_reserved()) {
// Try to align metaspace so that we can decode a compressed klass
// with a single MOVK instruction. We can do this iff the
// Aarch64: Try to align metaspace so that we can decode a compressed
// klass with a single MOVK instruction. We can do this iff the
// compressed class base is a multiple of 4G.
for (char *a = (char*)align_ptr_up(requested_addr, 4*G);
// Aix: Search for a place where we can find memory. If we need to load
// the base, 4G alignment is helpful, too.
size_t increment = AARCH64_ONLY(4*)G;
for (char *a = (char*)align_ptr_up(requested_addr, increment);
a < (char*)(1024*G);
a += 4*G) {
a += increment) {
if (a == (char *)(32*G)) {
// Go faster from here on. Zero-based is no longer possible.
increment = 4*G;
}
#if INCLUDE_CDS
if (UseSharedSpaces

View File

@ -2182,39 +2182,21 @@ const char* InstanceKlass::signature_name() const {
return dest;
}
const jbyte* InstanceKlass::package_from_name(const Symbol* name, int& length) {
ResourceMark rm;
length = 0;
// Used to obtain the package name from a fully qualified class name.
Symbol* InstanceKlass::package_from_name(const Symbol* name, TRAPS) {
if (name == NULL) {
return NULL;
} else {
const jbyte* base_name = name->base();
const jbyte* last_slash = UTF8::strrchr(base_name, name->utf8_length(), '/');
if (last_slash == NULL) {
// No package name
if (name->utf8_length() <= 0) {
return NULL;
} else {
// Skip over '['s
if (*base_name == '[') {
do {
base_name++;
} while (*base_name == '[');
if (*base_name != 'L') {
// Fully qualified class names should not contain a 'L'.
// Set length to -1 to indicate that the package name
// could not be obtained due to an error condition.
// In this situtation, is_same_class_package returns false.
length = -1;
return NULL;
}
}
// Found the package name, look it up in the symbol table.
length = last_slash - base_name;
assert(length > 0, "Bad length for package name");
return base_name;
}
ResourceMark rm;
const char* package_name = ClassLoader::package_from_name((const char*) name->as_C_string());
if (package_name == NULL) {
return NULL;
}
Symbol* pkg_name = SymbolTable::new_symbol(package_name, THREAD);
return pkg_name;
}
}
@ -2230,12 +2212,9 @@ ModuleEntry* InstanceKlass::module() const {
}
void InstanceKlass::set_package(ClassLoaderData* loader_data, TRAPS) {
int length = 0;
const jbyte* base_name = package_from_name(name(), length);
if (base_name != NULL && loader_data != NULL) {
TempNewSymbol pkg_name = SymbolTable::new_symbol((const char*)base_name, length, CHECK);
TempNewSymbol pkg_name = package_from_name(name(), CHECK);
if (pkg_name != NULL && loader_data != NULL) {
// Find in class loader's package entry table.
_package_entry = loader_data->packages()->lookup_only(pkg_name);
@ -2331,20 +2310,18 @@ bool InstanceKlass::is_same_class_package(oop class_loader1, const Symbol* class
if (class_loader1 != class_loader2) {
return false;
} else if (class_name1 == class_name2) {
return true; // skip painful bytewise comparison
return true;
} else {
ResourceMark rm;
// The Symbol*'s are in UTF8 encoding. Since we only need to check explicitly
// for ASCII characters ('/', 'L', '['), we can keep them in UTF8 encoding.
// Otherwise, we just compare jbyte values between the strings.
int length1 = 0;
int length2 = 0;
const jbyte *name1 = package_from_name(class_name1, length1);
const jbyte *name2 = package_from_name(class_name2, length2);
bool bad_class_name = false;
const char* name1 = ClassLoader::package_from_name((const char*) class_name1->as_C_string(), &bad_class_name);
if (bad_class_name) {
return false;
}
if ((length1 < 0) || (length2 < 0)) {
// error occurred parsing package name.
const char* name2 = ClassLoader::package_from_name((const char*) class_name2->as_C_string(), &bad_class_name);
if (bad_class_name) {
return false;
}
@ -2354,13 +2331,13 @@ bool InstanceKlass::is_same_class_package(oop class_loader1, const Symbol* class
return name1 == name2;
}
// Check that package part is identical
return UTF8::equal(name1, length1, name2, length2);
// Check that package is identical
return (strcmp(name1, name2) == 0);
}
}
// Returns true iff super_method can be overridden by a method in targetclassname
// See JSL 3rd edition 8.4.6.1
// See JLS 3rd edition 8.4.6.1
// Assumes name-signature match
// "this" is InstanceKlass of super_method which must exist
// note that the InstanceKlass of the method in the targetclassname has not always been created yet
@ -2716,7 +2693,7 @@ nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_le
return NULL;
}
bool InstanceKlass::add_member_name(Handle mem_name) {
oop InstanceKlass::add_member_name(Handle mem_name, bool intern) {
jweak mem_name_wref = JNIHandles::make_weak_global(mem_name);
MutexLocker ml(MemberNameTable_lock);
DEBUG_ONLY(NoSafepointVerifier nsv);
@ -2726,7 +2703,7 @@ bool InstanceKlass::add_member_name(Handle mem_name) {
// is called!
Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(mem_name());
if (method->is_obsolete()) {
return false;
return NULL;
} else if (method->is_old()) {
// Replace method with redefined version
java_lang_invoke_MemberName::set_vmtarget(mem_name(), method_with_idnum(method->method_idnum()));
@ -2735,8 +2712,11 @@ bool InstanceKlass::add_member_name(Handle mem_name) {
if (_member_names == NULL) {
_member_names = new (ResourceObj::C_HEAP, mtClass) MemberNameTable(idnum_allocated_count());
}
_member_names->add_member_name(mem_name_wref);
return true;
if (intern) {
return _member_names->find_or_add_member_name(mem_name_wref);
} else {
return _member_names->add_member_name(mem_name_wref);
}
}
// -----------------------------------------------------------------------------------------------------
@ -3027,7 +3007,11 @@ void InstanceKlass::print_loading_log(LogLevel::type type,
if (cfs != NULL) {
if (cfs->source() != NULL) {
if (module_name != NULL) {
log->print(" source: jrt:/%s", module_name);
if (ClassLoader::is_jrt(cfs->source())) {
log->print(" source: jrt:/%s", module_name);
} else {
log->print(" source: %s", cfs->source());
}
} else {
log->print(" source: %s", cfs->source());
}

View File

@ -1108,7 +1108,7 @@ public:
// Naming
const char* signature_name() const;
static const jbyte* package_from_name(const Symbol* name, int& length);
static Symbol* package_from_name(const Symbol* name, TRAPS);
// GC specific object visitors
//
@ -1298,7 +1298,7 @@ public:
// JSR-292 support
MemberNameTable* member_names() { return _member_names; }
void set_member_names(MemberNameTable* member_names) { _member_names = member_names; }
bool add_member_name(Handle member_name);
oop add_member_name(Handle member_name, bool intern);
public:
// JVMTI support

View File

@ -246,7 +246,7 @@ class Method : public Metadata {
int code_size() const { return constMethod()->code_size(); }
// method size in words
int method_size() const { return sizeof(Method)/wordSize + is_native() ? 2 : 0; }
int method_size() const { return sizeof(Method)/wordSize + ( is_native() ? 2 : 0 ); }
// constant pool for Klass* holding this method
ConstantPool* constants() const { return constMethod()->constants(); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -204,7 +204,8 @@ Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int c
}
if (!finish_transform(phase, can_reshape, ctl, mem)) {
return NULL;
// Return NodeSentinel to indicate that the transform failed
return NodeSentinel;
}
return mem;
@ -222,6 +223,7 @@ bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape,
Node* dest = in(ArrayCopyNode::Dest);
const Type* src_type = phase->type(src);
const TypeAryPtr* ary_src = src_type->isa_aryptr();
assert(ary_src != NULL, "should be an array copy/clone");
if (is_arraycopy() || is_copyofrange() || is_copyof()) {
const Type* dest_type = phase->type(dest);
@ -520,7 +522,7 @@ Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* mem = try_clone_instance(phase, can_reshape, count);
if (mem != NULL) {
return mem;
return (mem == NodeSentinel) ? NULL : mem;
}
Node* adr_src = NULL;
@ -627,31 +629,37 @@ bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
return CallNode::may_modify_arraycopy_helper(dest_t, t_oop, phase);
}
bool ArrayCopyNode::may_modify_helper(const TypeOopPtr *t_oop, Node* n, PhaseTransform *phase) {
bool ArrayCopyNode::may_modify_helper(const TypeOopPtr *t_oop, Node* n, PhaseTransform *phase, ArrayCopyNode*& ac) {
if (n->is_Proj()) {
n = n->in(0);
if (n->is_Call() && n->as_Call()->may_modify(t_oop, phase)) {
if (n->isa_ArrayCopy() != NULL) {
ac = n->as_ArrayCopy();
}
return true;
}
}
return false;
}
bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, MemBarNode* mb, PhaseTransform *phase) {
bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, MemBarNode* mb, PhaseTransform *phase, ArrayCopyNode*& ac) {
Node* mem = mb->in(TypeFunc::Memory);
if (mem->is_MergeMem()) {
Node* n = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
if (may_modify_helper(t_oop, n, phase)) {
if (may_modify_helper(t_oop, n, phase, ac)) {
return true;
} else if (n->is_Phi()) {
for (uint i = 1; i < n->req(); i++) {
if (n->in(i) != NULL) {
if (may_modify_helper(t_oop, n->in(i), phase)) {
if (may_modify_helper(t_oop, n->in(i), phase, ac)) {
return true;
}
}
}
} else if (n->Opcode() == Op_StoreCM) {
// Ignore card mark stores
return may_modify_helper(t_oop, n->in(MemNode::Memory), phase, ac);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -107,7 +107,7 @@ private:
BasicType copy_type, const Type* value_type, int count);
bool finish_transform(PhaseGVN *phase, bool can_reshape,
Node* ctl, Node *mem);
static bool may_modify_helper(const TypeOopPtr *t_oop, Node* n, PhaseTransform *phase);
static bool may_modify_helper(const TypeOopPtr *t_oop, Node* n, PhaseTransform *phase, ArrayCopyNode*& ac);
public:
@ -162,7 +162,7 @@ public:
bool is_alloc_tightly_coupled() const { return _alloc_tightly_coupled; }
static bool may_modify(const TypeOopPtr *t_oop, MemBarNode* mb, PhaseTransform *phase);
static bool may_modify(const TypeOopPtr *t_oop, MemBarNode* mb, PhaseTransform *phase, ArrayCopyNode*& ac);
bool modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseTransform* phase, bool must_modify);
#ifndef PRODUCT

View File

@ -4306,8 +4306,15 @@ void GraphKit::g1_write_barrier_post(Node* oop_store,
} __ end_if();
} __ end_if();
} else {
// Object.clone() instrinsic uses this path.
g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
// The Object.clone() intrinsic uses this path if !ReduceInitialCardMarks.
// We don't need a barrier here if the destination is a newly allocated object
// in Eden. Otherwise, GC verification breaks because we assume that cards in Eden
// are set to 'g1_young_gen' (see G1SATBCardTableModRefBS::verify_g1_young_region()).
assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
__ if_then(card_val, BoolTest::ne, young_card); {
g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
} __ end_if();
}
// Final sync IdealKit and GraphKit.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,6 +32,7 @@
#include "opto/cfgnode.hpp"
#include "opto/compile.hpp"
#include "opto/convertnode.hpp"
#include "opto/graphKit.hpp"
#include "opto/locknode.hpp"
#include "opto/loopnode.hpp"
#include "opto/macro.hpp"
@ -263,41 +264,58 @@ void PhaseMacroExpand::eliminate_card_mark(Node* p2x) {
// checks if the store done to a different from the value's region.
// And replace Cmp with #0 (false) to collapse G1 post barrier.
Node* xorx = p2x->find_out_with(Op_XorX);
assert(xorx != NULL, "missing G1 post barrier");
Node* shift = xorx->unique_out();
Node* cmpx = shift->unique_out();
assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
"missing region check in G1 post barrier");
_igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
if (xorx != NULL) {
Node* shift = xorx->unique_out();
Node* cmpx = shift->unique_out();
assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
"missing region check in G1 post barrier");
_igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
// Remove G1 pre barrier.
// Remove G1 pre barrier.
// Search "if (marking != 0)" check and set it to "false".
// There is no G1 pre barrier if previous stored value is NULL
// (for example, after initialization).
if (this_region->is_Region() && this_region->req() == 3) {
int ind = 1;
if (!this_region->in(ind)->is_IfFalse()) {
ind = 2;
}
if (this_region->in(ind)->is_IfFalse()) {
Node* bol = this_region->in(ind)->in(0)->in(1);
assert(bol->is_Bool(), "");
cmpx = bol->in(1);
if (bol->as_Bool()->_test._test == BoolTest::ne &&
cmpx->is_Cmp() && cmpx->in(2) == intcon(0) &&
cmpx->in(1)->is_Load()) {
Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address);
const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +
SATBMarkQueue::byte_offset_of_active());
if (adr->is_AddP() && adr->in(AddPNode::Base) == top() &&
adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
adr->in(AddPNode::Offset) == MakeConX(marking_offset)) {
_igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
// Search "if (marking != 0)" check and set it to "false".
// There is no G1 pre barrier if previous stored value is NULL
// (for example, after initialization).
if (this_region->is_Region() && this_region->req() == 3) {
int ind = 1;
if (!this_region->in(ind)->is_IfFalse()) {
ind = 2;
}
if (this_region->in(ind)->is_IfFalse()) {
Node* bol = this_region->in(ind)->in(0)->in(1);
assert(bol->is_Bool(), "");
cmpx = bol->in(1);
if (bol->as_Bool()->_test._test == BoolTest::ne &&
cmpx->is_Cmp() && cmpx->in(2) == intcon(0) &&
cmpx->in(1)->is_Load()) {
Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address);
const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +
SATBMarkQueue::byte_offset_of_active());
if (adr->is_AddP() && adr->in(AddPNode::Base) == top() &&
adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
adr->in(AddPNode::Offset) == MakeConX(marking_offset)) {
_igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
}
}
}
}
} else {
assert(!GraphKit::use_ReduceInitialCardMarks(), "can only happen with card marking");
// This is a G1 post barrier emitted by the Object.clone() intrinsic.
// Search for the CastP2X->URShiftX->AddP->LoadB->Cmp path which checks if the card
// is marked as young_gen and replace the Cmp with 0 (false) to collapse the barrier.
Node* shift = p2x->find_out_with(Op_URShiftX);
assert(shift != NULL, "missing G1 post barrier");
Node* addp = shift->unique_out();
Node* load = addp->find_out_with(Op_LoadB);
assert(load != NULL, "missing G1 post barrier");
Node* cmpx = load->unique_out();
assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
"missing card value check in G1 post barrier");
_igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
// There is no G1 pre barrier in this case
}
// Now CastP2X can be removed since it is used only on dead path
// which currently still alive until igvn optimize it.
@ -326,17 +344,15 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me
CallNode *call = in->as_Call();
if (call->may_modify(tinst, phase)) {
assert(call->is_ArrayCopy(), "ArrayCopy is the only call node that doesn't make allocation escape");
if (call->as_ArrayCopy()->modifies(offset, offset, phase, false)) {
return in;
}
}
mem = in->in(TypeFunc::Memory);
} else if (in->is_MemBar()) {
if (ArrayCopyNode::may_modify(tinst, in->as_MemBar(), phase)) {
assert(in->in(0)->is_Proj() && in->in(0)->in(0)->is_ArrayCopy(), "should be arraycopy");
ArrayCopyNode* ac = in->in(0)->in(0)->as_ArrayCopy();
assert(ac->is_clonebasic(), "Only basic clone is a non escaping clone");
ArrayCopyNode* ac = NULL;
if (ArrayCopyNode::may_modify(tinst, in->as_MemBar(), phase, ac)) {
assert(ac != NULL && ac->is_clonebasic(), "Only basic clone is a non escaping clone");
return ac;
}
mem = in->in(TypeFunc::Memory);

View File

@ -160,7 +160,8 @@ Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oo
}
}
} else if (proj_in->is_MemBar()) {
if (ArrayCopyNode::may_modify(t_oop, proj_in->as_MemBar(), phase)) {
ArrayCopyNode* ac = NULL;
if (ArrayCopyNode::may_modify(t_oop, proj_in->as_MemBar(), phase, ac)) {
break;
}
result = proj_in->in(TypeFunc::Memory);
@ -657,7 +658,8 @@ Node* MemNode::find_previous_store(PhaseTransform* phase) {
continue; // (a) advance through independent call memory
}
} else if (mem->is_Proj() && mem->in(0)->is_MemBar()) {
if (ArrayCopyNode::may_modify(addr_t, mem->in(0)->as_MemBar(), phase)) {
ArrayCopyNode* ac = NULL;
if (ArrayCopyNode::may_modify(addr_t, mem->in(0)->as_MemBar(), phase, ac)) {
break;
}
mem = mem->in(0)->in(TypeFunc::Memory);

View File

@ -166,14 +166,11 @@ class Parse : public GraphKit {
int _all_successors; // Include exception paths also.
Block** _successors;
// Use init_node/init_graph to initialize Blocks.
// Block() : _live_locals((uintptr_t*)NULL,0) { ShouldNotReachHere(); }
Block() : _live_locals() { ShouldNotReachHere(); }
public:
// Set up the block data structure itself.
void init_node(Parse* outer, int po);
Block(Parse* outer, int rpo);
// Set up the block's relations to other blocks.
void init_graph(Parse* outer);

View File

@ -1235,29 +1235,33 @@ void Parse::init_blocks() {
// Create the blocks.
_block_count = flow()->block_count();
_blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
Copy::zero_to_bytes(_blocks, sizeof(Block)*_block_count);
int rpo;
// Initialize the structs.
for (rpo = 0; rpo < block_count(); rpo++) {
for (int rpo = 0; rpo < block_count(); rpo++) {
Block* block = rpo_at(rpo);
block->init_node(this, rpo);
new(block) Block(this, rpo);
}
// Collect predecessor and successor information.
for (rpo = 0; rpo < block_count(); rpo++) {
for (int rpo = 0; rpo < block_count(); rpo++) {
Block* block = rpo_at(rpo);
block->init_graph(this);
}
}
//-------------------------------init_node-------------------------------------
void Parse::Block::init_node(Parse* outer, int rpo) {
Parse::Block::Block(Parse* outer, int rpo) : _live_locals() {
_flow = outer->flow()->rpo_at(rpo);
_pred_count = 0;
_preds_parsed = 0;
_count = 0;
_is_parsed = false;
_is_handler = false;
_has_merged_backedge = false;
_start_map = NULL;
_num_successors = 0;
_all_successors = 0;
_successors = NULL;
assert(pred_count() == 0 && preds_parsed() == 0, "sanity");
assert(!(is_merged() || is_parsed() || is_handler() || has_merged_backedge()), "sanity");
assert(_live_locals.size() == 0, "sanity");

View File

@ -695,7 +695,7 @@ JVM_ENTRY(jobject, JVM_Clone(JNIEnv* env, jobject handle))
// This can safepoint and redefine method, so need both new_obj and method
// in a handle, for two different reasons. new_obj can move, method can be
// deleted if nothing is using it on the stack.
m->method_holder()->add_member_name(new_obj());
m->method_holder()->add_member_name(new_obj(), false);
}
}

View File

@ -319,15 +319,7 @@ jvmtiError
JvmtiEnv::GetObjectSize(jobject object, jlong* size_ptr) {
oop mirror = JNIHandles::resolve_external_guard(object);
NULL_CHECK(mirror, JVMTI_ERROR_INVALID_OBJECT);
if (mirror->klass() == SystemDictionary::Class_klass() &&
!java_lang_Class::is_primitive(mirror)) {
Klass* k = java_lang_Class::as_Klass(mirror);
assert(k != NULL, "class for non-primitive mirror must exist");
*size_ptr = (jlong)k->size() * wordSize;
} else {
*size_ptr = (jlong)mirror->size() * wordSize;
}
*size_ptr = (jlong)mirror->size() * wordSize;
return JVMTI_ERROR_NONE;
} /* end GetObjectSize */

View File

@ -178,7 +178,7 @@ oop MethodHandles::init_MemberName(Handle mname, Handle target) {
return NULL;
}
oop MethodHandles::init_method_MemberName(Handle mname, CallInfo& info) {
oop MethodHandles::init_method_MemberName(Handle mname, CallInfo& info, bool intern) {
assert(info.resolved_appendix().is_null(), "only normal methods here");
methodHandle m = info.resolved_method();
assert(m.not_null(), "null method handle");
@ -279,13 +279,7 @@ oop MethodHandles::init_method_MemberName(Handle mname, CallInfo& info) {
// If relevant, the vtable or itable value is stored as vmindex.
// This is done eagerly, since it is readily available without
// constructing any new objects.
// TO DO: maybe intern mname_oop
if (m->method_holder()->add_member_name(mname)) {
return mname();
} else {
// Redefinition caused this to fail. Return NULL (and an exception?)
return NULL;
}
return m->method_holder()->add_member_name(mname, intern);
}
oop MethodHandles::init_field_MemberName(Handle mname, fieldDescriptor& fd, bool is_setter) {
@ -975,7 +969,9 @@ int MethodHandles::find_MemberNames(KlassHandle k,
if (!java_lang_invoke_MemberName::is_instance(result()))
return -99; // caller bug!
CallInfo info(m);
oop saved = MethodHandles::init_method_MemberName(result, info);
// Since this is going through the methods to create MemberNames, don't search
// for matching methods already in the table
oop saved = MethodHandles::init_method_MemberName(result, info, /*intern*/false);
if (saved != result())
results->obj_at_put(rfill-1, saved); // show saved instance to user
} else if (++overflow >= overflow_limit) {
@ -1056,9 +1052,34 @@ MemberNameTable::~MemberNameTable() {
}
}
void MemberNameTable::add_member_name(jweak mem_name_wref) {
oop MemberNameTable::add_member_name(jweak mem_name_wref) {
assert_locked_or_safepoint(MemberNameTable_lock);
this->push(mem_name_wref);
return JNIHandles::resolve(mem_name_wref);
}
oop MemberNameTable::find_or_add_member_name(jweak mem_name_wref) {
assert_locked_or_safepoint(MemberNameTable_lock);
oop new_mem_name = JNIHandles::resolve(mem_name_wref);
// Find matching member name in the list.
// This is linear because these because these are short lists.
int len = this->length();
int new_index = len;
for (int idx = 0; idx < len; idx++) {
oop mname = JNIHandles::resolve(this->at(idx));
if (mname == NULL) {
new_index = idx;
continue;
}
if (java_lang_invoke_MemberName::equals(new_mem_name, mname)) {
JNIHandles::destroy_weak_global(mem_name_wref);
return mname;
}
}
// Not found, push the new one, or reuse empty slot
this->at_put_grow(new_index, mem_name_wref);
return new_mem_name;
}
#if INCLUDE_JVMTI

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,7 +66,7 @@ class MethodHandles: AllStatic {
static Handle new_MemberName(TRAPS); // must be followed by init_MemberName
static oop init_MemberName(Handle mname_h, Handle target_h); // compute vmtarget/vmindex from target
static oop init_field_MemberName(Handle mname_h, fieldDescriptor& fd, bool is_setter = false);
static oop init_method_MemberName(Handle mname_h, CallInfo& info);
static oop init_method_MemberName(Handle mname_h, CallInfo& info, bool intern = true);
static int method_ref_kind(Method* m, bool do_dispatch_if_possible = true);
static int find_MemberNames(KlassHandle k, Symbol* name, Symbol* sig,
int mflags, KlassHandle caller,
@ -253,7 +253,8 @@ class MemberNameTable : public GrowableArray<jweak> {
public:
MemberNameTable(int methods_cnt);
~MemberNameTable();
void add_member_name(jweak mem_name_ref);
oop add_member_name(jweak mem_name_ref);
oop find_or_add_member_name(jweak mem_name_ref);
#if INCLUDE_JVMTI
// RedefineClasses() API support:

View File

@ -356,19 +356,6 @@ UNSAFE_ENTRY(jobject, Unsafe_GetUncompressedObject(JNIEnv *env, jobject unsafe,
return JNIHandles::make_local(env, v);
} UNSAFE_END
UNSAFE_ENTRY(jclass, Unsafe_GetJavaMirror(JNIEnv *env, jobject unsafe, jlong metaspace_klass)) {
Klass* klass = (Klass*) (address) metaspace_klass;
return (jclass) JNIHandles::make_local(klass->java_mirror());
} UNSAFE_END
UNSAFE_ENTRY(jlong, Unsafe_GetKlassPointer(JNIEnv *env, jobject unsafe, jobject obj)) {
oop o = JNIHandles::resolve(obj);
jlong klass = (jlong) (address) o->klass();
return klass;
} UNSAFE_END
#ifndef SUPPORTS_NATIVE_CX8
// VM_Version::supports_cx8() is a surrogate for 'supports atomic long memory ops'.
@ -1152,8 +1139,6 @@ static JNINativeMethod jdk_internal_misc_Unsafe_methods[] = {
{CC "putObjectVolatile",CC "(" OBJ "J" OBJ ")V", FN_PTR(Unsafe_PutObjectVolatile)},
{CC "getUncompressedObject", CC "(" ADR ")" OBJ, FN_PTR(Unsafe_GetUncompressedObject)},
{CC "getJavaMirror", CC "(" ADR ")" CLS, FN_PTR(Unsafe_GetJavaMirror)},
{CC "getKlassPointer", CC "(" OBJ ")" ADR, FN_PTR(Unsafe_GetKlassPointer)},
DECLARE_GETPUTOOP(Boolean, Z),
DECLARE_GETPUTOOP(Byte, B),

View File

@ -1433,6 +1433,10 @@ WB_ENTRY(jlong, WB_MetaspaceCapacityUntilGC(JNIEnv* env, jobject wb))
return (jlong) MetaspaceGC::capacity_until_GC();
WB_END
WB_ENTRY(jboolean, WB_MetaspaceShouldConcurrentCollect(JNIEnv* env, jobject wb))
return MetaspaceGC::should_concurrent_collect();
WB_END
WB_ENTRY(void, WB_AssertMatchingSafepointCalls(JNIEnv* env, jobject o, jboolean mutexSafepointValue, jboolean attemptedNoSafepointValue))
Monitor::SafepointCheckRequired sfpt_check_required = mutexSafepointValue ?
@ -1813,6 +1817,7 @@ static JNINativeMethod methods[] = {
CC"(Ljava/lang/ClassLoader;JJ)V", (void*)&WB_FreeMetaspace },
{CC"incMetaspaceCapacityUntilGC", CC"(J)J", (void*)&WB_IncMetaspaceCapacityUntilGC },
{CC"metaspaceCapacityUntilGC", CC"()J", (void*)&WB_MetaspaceCapacityUntilGC },
{CC"metaspaceShouldConcurrentCollect", CC"()Z", (void*)&WB_MetaspaceShouldConcurrentCollect },
{CC"getCPUFeatures", CC"()Ljava/lang/String;", (void*)&WB_GetCPUFeatures },
{CC"getNMethod0", CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
(void*)&WB_GetNMethod },

View File

@ -498,6 +498,19 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
}
#endif
if (thread->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
assert(thread->has_pending_exception(), "should have thrown OOME");
thread->set_exception_oop(thread->pending_exception());
thread->clear_pending_exception();
exec_mode = Unpack_exception;
}
#if INCLUDE_JVMCI
if (thread->frames_to_pop_failed_realloc() > 0) {
thread->set_pending_monitorenter(false);
}
#endif
UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
caller_adjustment * BytesPerWord,
caller_was_method_handle ? 0 : callee_parameters,

View File

@ -446,6 +446,14 @@ void before_exit(JavaThread* thread) {
os::infinite_sleep();
}
EventThreadEnd event;
if (event.should_commit()) {
event.set_thread(THREAD_TRACE_ID(thread));
event.commit();
}
TRACE_VM_EXIT();
// Stop the WatcherThread. We do this before disenrolling various
// PeriodicTasks to reduce the likelihood of races.
if (PeriodicTask::num_tasks() > 0) {
@ -484,13 +492,6 @@ void before_exit(JavaThread* thread) {
JvmtiExport::post_thread_end(thread);
}
EventThreadEnd event;
if (event.should_commit()) {
event.set_thread(THREAD_TRACE_ID(thread));
event.commit();
}
// Always call even when there are not JVMTI environments yet, since environments
// may be attached late and JVMTI must track phases of VM execution
JvmtiExport::post_vm_death();

View File

@ -274,7 +274,7 @@ void mutex_init() {
def(JfrMsg_lock , Monitor, leaf, true, Monitor::_safepoint_check_always);
def(JfrBuffer_lock , Mutex, leaf, true, Monitor::_safepoint_check_never);
def(JfrThreadGroups_lock , Mutex, leaf, true, Monitor::_safepoint_check_always);
def(JfrStream_lock , Mutex, nonleaf, true, Monitor::_safepoint_check_never);
def(JfrStream_lock , Mutex, leaf+1, true, Monitor::_safepoint_check_never); // ensure to rank lower than 'safepoint'
def(JfrStacktrace_lock , Mutex, special, true, Monitor::_safepoint_check_sometimes);
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -73,10 +73,7 @@ GrowableArray<FilteredField*> *FilteredFieldsMap::_filtered_fields =
void FilteredFieldsMap::initialize() {
int offset;
offset = java_lang_Throwable::get_backtrace_offset();
_filtered_fields->append(new FilteredField(SystemDictionary::Throwable_klass(), offset));
offset = reflect_ConstantPool::oop_offset();
int offset = reflect_ConstantPool::oop_offset();
_filtered_fields->append(new FilteredField(SystemDictionary::reflect_ConstantPool_klass(), offset));
offset = reflect_UnsafeStaticFieldAccessorImpl::base_offset();
_filtered_fields->append(new FilteredField(SystemDictionary::reflect_UnsafeStaticFieldAccessorImpl_klass(), offset));

View File

@ -3405,6 +3405,8 @@ static void call_initPhase1(TRAPS) {
//
// After phase 2, The VM will begin search classes from -Xbootclasspath/a.
static void call_initPhase2(TRAPS) {
TraceTime timer("Phase2 initialization", TRACETIME_LOG(Info, modules, startuptime));
Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_System(), true, CHECK);
instanceKlassHandle klass (THREAD, k);

View File

@ -71,9 +71,12 @@ inline jlong Thread::cooked_allocated_bytes() {
jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
if (UseTLAB) {
size_t used_bytes = tlab().used_bytes();
if ((ssize_t)used_bytes > 0) {
// More-or-less valid tlab. The load_acquire above should ensure
// that the result of the add is <= the instantaneous value.
if (used_bytes <= ThreadLocalAllocBuffer::max_size_in_bytes()) {
// Comparing used_bytes with the maximum allowed size will ensure
// that we don't add the used bytes from a semi-initialized TLAB
// ending up with incorrect values. There is still a race between
// incrementing _allocated_bytes and clearing the TLAB, that might
// cause double counting in rare cases.
return allocated_bytes + used_bytes;
}
}

View File

@ -171,6 +171,8 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters,
int exec_mode) {
JavaThread* thread = (JavaThread*) Thread::current();
bool realloc_failure_exception = thread->frames_to_pop_failed_realloc() > 0;
// Look at bci and decide on bcp and continuation pc
address bcp;
// C++ interpreter doesn't need a pc since it will figure out what to do when it
@ -204,10 +206,12 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters,
//
// For Compiler1, deoptimization can occur while throwing a NullPointerException at monitorenter,
// in which case bcp should point to the monitorenter since it is within the exception's range.
//
// For realloc failure exception we just pop frames, skip the guarantee.
assert(*bcp != Bytecodes::_monitorenter || is_top_frame, "a _monitorenter must be a top frame");
assert(thread->deopt_compiled_method() != NULL, "compiled method should be known");
guarantee(!(thread->deopt_compiled_method()->is_compiled_by_c2() &&
guarantee(realloc_failure_exception || !(thread->deopt_compiled_method()->is_compiled_by_c2() &&
*bcp == Bytecodes::_monitorenter &&
exec_mode == Deoptimization::Unpack_exception),
"shouldn't get exception during monitorenter");
@ -237,12 +241,17 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters,
// Deoptimization::fetch_unroll_info_helper
popframe_preserved_args_size_in_words = in_words(thread->popframe_preserved_args_size_in_words());
}
} else if (JvmtiExport::can_force_early_return() && state != NULL && state->is_earlyret_pending()) {
} else if (!realloc_failure_exception && JvmtiExport::can_force_early_return() && state != NULL && state->is_earlyret_pending()) {
// Force early return from top frame after deoptimization
#ifndef CC_INTERP
pc = Interpreter::remove_activation_early_entry(state->earlyret_tos());
#endif
} else {
if (realloc_failure_exception && JvmtiExport::can_force_early_return() && state != NULL && state->is_earlyret_pending()) {
state->clr_earlyret_pending();
state->set_earlyret_oop(NULL);
state->clr_earlyret_value();
}
// Possibly override the previous pc computation of the top (youngest) frame
switch (exec_mode) {
case Deoptimization::Unpack_deopt:

Some files were not shown because too many files have changed in this diff Show More