This commit is contained in:
Andrew Haley 2016-07-14 15:18:15 +01:00
commit 8c7da4b546
1419 changed files with 29527 additions and 24181 deletions
.hgtags.hgtags-top-repo
common
corba
hotspot
.hgtags
make
src
cpu
jdk.vm.ci/share/classes
jdk.vm.ci.hotspot.sparc/src/jdk/vm/ci/hotspot/sparc
jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot
os
share/vm
test

@ -367,3 +367,4 @@ caf97b37ebec84288c112d21d3a60cb628cba1e8 jdk-9+119
7693aa00e131493ceb42b93305e2f014c9922a3b jdk-9+122
d53037a90c441cb528dc41c30827985de0e67c62 jdk-9+123
2a5697a98620c4f40e4a1a71478464399b8878de jdk-9+124
3aa52182b3ad7c5b3a61cf05a59dd07e4c5884e5 jdk-9+125

@ -367,3 +367,4 @@ cae471d3b87783e0a3deea658e1e1c84b2485b6c jdk-9+121
346be2df0f5b31d423807f53a719d1b9a67f3354 jdk-9+122
405d811c0d7b9b48ff718ae6c240b732f098c028 jdk-9+123
f80c841ae2545eaf9acd2724bccc305d98cefbe2 jdk-9+124
9aa7d40f3a453f51e47f4c1b19eff5740a74a9f8 jdk-9+125

@ -359,25 +359,32 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
# Starting amount of heap memory.
ADD_JVM_ARG_IF_OK([-Xms64M],boot_jdk_jvmargs_big,[$JAVA])
BOOTCYCLE_JVM_ARGS_BIG=-Xms64M
# Maximum amount of heap memory and stack size.
JVM_HEAP_LIMIT_32="1024"
# Running a 64 bit JVM allows for and requires a bigger heap
JVM_HEAP_LIMIT_64="1600"
STACK_SIZE_32=768
STACK_SIZE_64=1536
JVM_HEAP_LIMIT_GLOBAL=`expr $MEMORY_SIZE / 2`
if test "$JVM_HEAP_LIMIT_GLOBAL" -lt "$JVM_HEAP_LIMIT_32"; then
JVM_HEAP_LIMIT_32=$JVM_HEAP_LIMIT_GLOBAL
fi
if test "$JVM_HEAP_LIMIT_GLOBAL" -lt "$JVM_HEAP_LIMIT_64"; then
JVM_HEAP_LIMIT_64=$JVM_HEAP_LIMIT_GLOBAL
fi
if test "$JVM_HEAP_LIMIT_GLOBAL" -lt "512"; then
JVM_HEAP_LIMIT_32=512
JVM_HEAP_LIMIT_64=512
fi
# Maximum amount of heap memory.
# Maximum stack size.
JVM_MAX_HEAP=`expr $MEMORY_SIZE / 2`
if test "x$BOOT_JDK_BITS" = "x32"; then
if test "$JVM_MAX_HEAP" -gt "1100"; then
JVM_MAX_HEAP=1100
elif test "$JVM_MAX_HEAP" -lt "512"; then
JVM_MAX_HEAP=512
fi
STACK_SIZE=768
STACK_SIZE=$STACK_SIZE_32
JVM_MAX_HEAP=$JVM_HEAP_LIMIT_32
else
# Running a 64 bit JVM allows for and requires a bigger heap
if test "$JVM_MAX_HEAP" -gt "1600"; then
JVM_MAX_HEAP=1600
elif test "$JVM_MAX_HEAP" -lt "512"; then
JVM_MAX_HEAP=512
fi
STACK_SIZE=1536
STACK_SIZE=$STACK_SIZE_64
JVM_MAX_HEAP=$JVM_HEAP_LIMIT_64
fi
ADD_JVM_ARG_IF_OK([-Xmx${JVM_MAX_HEAP}M],boot_jdk_jvmargs_big,[$JAVA])
ADD_JVM_ARG_IF_OK([-XX:ThreadStackSize=$STACK_SIZE],boot_jdk_jvmargs_big,[$JAVA])
@ -387,6 +394,19 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
JAVA_FLAGS_BIG=$boot_jdk_jvmargs_big
AC_SUBST(JAVA_FLAGS_BIG)
if test "x$OPENJDK_TARGET_CPU_BITS" = "x32"; then
BOOTCYCLE_MAX_HEAP=$JVM_HEAP_LIMIT_32
BOOTCYCLE_STACK_SIZE=$STACK_SIZE_32
else
BOOTCYCLE_MAX_HEAP=$JVM_HEAP_LIMIT_64
BOOTCYCLE_STACK_SIZE=$STACK_SIZE_64
fi
BOOTCYCLE_JVM_ARGS_BIG="$BOOTCYCLE_JVM_ARGS_BIG -Xmx${BOOTCYCLE_MAX_HEAP}M"
BOOTCYCLE_JVM_ARGS_BIG="$BOOTCYCLE_JVM_ARGS_BIG -XX:ThreadStackSize=$BOOTCYCLE_STACK_SIZE"
AC_MSG_CHECKING([flags for bootcycle boot jdk java command for big workloads])
AC_MSG_RESULT([$BOOTCYCLE_JVM_ARGS_BIG])
AC_SUBST(BOOTCYCLE_JVM_ARGS_BIG)
# By default, the main javac compilations use big
JAVA_FLAGS_JAVAC="$JAVA_FLAGS_BIG"
AC_SUBST(JAVA_FLAGS_JAVAC)

@ -64,5 +64,7 @@ SJAVAC_SERVER_JAVA_CMD:=$(JAVA_CMD)
# When building a 32bit target, make sure the sjavac server flags are compatible
# with a 32bit JVM.
ifeq ($(OPENJDK_TARGET_CPU_BITS), 32)
SJAVAC_SERVER_JAVA_FLAGS:= -Xms256M -Xmx1500M
SJAVAC_SERVER_JAVA_FLAGS := @BOOTCYCLE_JVM_ARGS_BIG@
endif
# The bootcycle JVM arguments may differ from the original boot jdk.
JAVA_FLAGS_BIG := @BOOTCYCLE_JVM_ARGS_BIG@

@ -367,6 +367,9 @@ AC_DEFUN_ONCE([BPERF_SETUP_PRECOMPILED_HEADERS],
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
AC_MSG_RESULT([no, does not work with Solaris Studio])
USE_PRECOMPILED_HEADER=0
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
AC_MSG_RESULT([no, does not work with xlc])
USE_PRECOMPILED_HEADER=0
else
AC_MSG_RESULT([yes])
fi

@ -593,9 +593,9 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
fi
C_O_FLAG_NONE="-O0"
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
C_O_FLAG_HIGHEST_JVM="-O3"
C_O_FLAG_HIGHEST="-O3"
C_O_FLAG_HI="-O3 -qstrict"
C_O_FLAG_HIGHEST_JVM="-O3 -qhot=level=1 -qinline -qinlglue"
C_O_FLAG_HIGHEST="-O3 -qhot=level=1 -qinline -qinlglue"
C_O_FLAG_HI="-O3 -qinline -qinlglue"
C_O_FLAG_NORM="-O2"
C_O_FLAG_DEBUG="-qnoopt"
# FIXME: Value below not verified.
@ -911,8 +911,8 @@ AC_DEFUN([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK_HELPER],
elif test "x$OPENJDK_$1_OS" = xaix; then
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -DAIX"
# We may need '-qminimaltoc' or '-qpic=large -bbigtoc' if the TOC overflows.
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -qtune=balanced -qhot=level=1 -qinline \
-qinlglue -qalias=noansi -qstrict -qtls=default -qlanglvl=c99vla \
$2JVM_CFLAGS="[$]$2JVM_CFLAGS -qtune=balanced \
-qalias=noansi -qstrict -qtls=default -qlanglvl=c99vla \
-qlanglvl=noredefmac -qnortti -qnoeh -qignerrno"
elif test "x$OPENJDK_$1_OS" = xbsd; then
$2COMMON_CCXXFLAGS_JDK="[$]$2COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE"

@ -644,6 +644,7 @@ SJAVAC_SERVER_JAVA
JAVA_TOOL_FLAGS_SMALL
JAVA_FLAGS_SMALL
JAVA_FLAGS_JAVAC
BOOTCYCLE_JVM_ARGS_BIG
JAVA_FLAGS_BIG
JAVA_FLAGS
TEST_JOBS
@ -5094,7 +5095,7 @@ VS_SDK_PLATFORM_NAME_2013=
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1467039751
DATE_WHEN_GENERATED=1467223237
###############################################################################
#
@ -49625,9 +49626,9 @@ $as_echo "$supports" >&6; }
fi
C_O_FLAG_NONE="-O0"
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
C_O_FLAG_HIGHEST_JVM="-O3"
C_O_FLAG_HIGHEST="-O3"
C_O_FLAG_HI="-O3 -qstrict"
C_O_FLAG_HIGHEST_JVM="-O3 -qhot=level=1 -qinline -qinlglue"
C_O_FLAG_HIGHEST="-O3 -qhot=level=1 -qinline -qinlglue"
C_O_FLAG_HI="-O3 -qinline -qinlglue"
C_O_FLAG_NORM="-O2"
C_O_FLAG_DEBUG="-qnoopt"
# FIXME: Value below not verified.
@ -50634,8 +50635,8 @@ $as_echo "$supports" >&6; }
elif test "x$OPENJDK_TARGET_OS" = xaix; then
JVM_CFLAGS="$JVM_CFLAGS -DAIX"
# We may need '-qminimaltoc' or '-qpic=large -bbigtoc' if the TOC overflows.
JVM_CFLAGS="$JVM_CFLAGS -qtune=balanced -qhot=level=1 -qinline \
-qinlglue -qalias=noansi -qstrict -qtls=default -qlanglvl=c99vla \
JVM_CFLAGS="$JVM_CFLAGS -qtune=balanced \
-qalias=noansi -qstrict -qtls=default -qlanglvl=c99vla \
-qlanglvl=noredefmac -qnortti -qnoeh -qignerrno"
elif test "x$OPENJDK_TARGET_OS" = xbsd; then
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE"
@ -51439,8 +51440,8 @@ $as_echo "$supports" >&6; }
elif test "x$OPENJDK_BUILD_OS" = xaix; then
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -DAIX"
# We may need '-qminimaltoc' or '-qpic=large -bbigtoc' if the TOC overflows.
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -qtune=balanced -qhot=level=1 -qinline \
-qinlglue -qalias=noansi -qstrict -qtls=default -qlanglvl=c99vla \
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -qtune=balanced \
-qalias=noansi -qstrict -qtls=default -qlanglvl=c99vla \
-qlanglvl=noredefmac -qnortti -qnoeh -qignerrno"
elif test "x$OPENJDK_BUILD_OS" = xbsd; then
OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK="$OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE"
@ -53468,7 +53469,7 @@ $as_echo "yes, forced" >&6; }
$as_echo "no, forced" >&6; }
BUILD_GTEST="false"
elif test "x$enable_hotspot_gtest" = "x"; then
if test "x$GTEST_DIR_EXISTS" = "xtrue"; then
if test "x$GTEST_DIR_EXISTS" = "xtrue" && test "x$OPENJDK_TARGET_OS" != "xaix"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
BUILD_GTEST="true"
@ -64612,12 +64613,16 @@ fi
if test "$OPENJDK_TARGET_OS" = "solaris"; then
if test "$OPENJDK_TARGET_OS" = "solaris" && test "x$BUILD_GTEST" = "xtrue"; then
# Find the root of the Solaris Studio installation from the compiler path
SOLARIS_STUDIO_DIR="$(dirname $CC)/.."
STLPORT_LIB="$SOLARIS_STUDIO_DIR/lib/stlport4$OPENJDK_TARGET_CPU_ISADIR/libstlport.so.1"
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for libstlport.so.1" >&5
$as_echo_n "checking for libstlport.so.1... " >&6; }
if ! test -f "$STLPORT_LIB" && test "x$OPENJDK_TARGET_CPU_ISADIR" = "x/sparcv9"; then
# SS12u3 has libstlport under 'stlport4/v9' instead of 'stlport4/sparcv9'
STLPORT_LIB="$SOLARIS_STUDIO_DIR/lib/stlport4/v9/libstlport.so.1"
fi
if test -f "$STLPORT_LIB"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes, $STLPORT_LIB" >&5
$as_echo "yes, $STLPORT_LIB" >&6; }
@ -65118,25 +65123,32 @@ $as_echo_n "checking flags for boot jdk java command for big workloads... " >&6;
JVM_ARG_OK=false
fi
BOOTCYCLE_JVM_ARGS_BIG=-Xms64M
# Maximum amount of heap memory and stack size.
JVM_HEAP_LIMIT_32="1024"
# Running a 64 bit JVM allows for and requires a bigger heap
JVM_HEAP_LIMIT_64="1600"
STACK_SIZE_32=768
STACK_SIZE_64=1536
JVM_HEAP_LIMIT_GLOBAL=`expr $MEMORY_SIZE / 2`
if test "$JVM_HEAP_LIMIT_GLOBAL" -lt "$JVM_HEAP_LIMIT_32"; then
JVM_HEAP_LIMIT_32=$JVM_HEAP_LIMIT_GLOBAL
fi
if test "$JVM_HEAP_LIMIT_GLOBAL" -lt "$JVM_HEAP_LIMIT_64"; then
JVM_HEAP_LIMIT_64=$JVM_HEAP_LIMIT_GLOBAL
fi
if test "$JVM_HEAP_LIMIT_GLOBAL" -lt "512"; then
JVM_HEAP_LIMIT_32=512
JVM_HEAP_LIMIT_64=512
fi
# Maximum amount of heap memory.
# Maximum stack size.
JVM_MAX_HEAP=`expr $MEMORY_SIZE / 2`
if test "x$BOOT_JDK_BITS" = "x32"; then
if test "$JVM_MAX_HEAP" -gt "1100"; then
JVM_MAX_HEAP=1100
elif test "$JVM_MAX_HEAP" -lt "512"; then
JVM_MAX_HEAP=512
fi
STACK_SIZE=768
STACK_SIZE=$STACK_SIZE_32
JVM_MAX_HEAP=$JVM_HEAP_LIMIT_32
else
# Running a 64 bit JVM allows for and requires a bigger heap
if test "$JVM_MAX_HEAP" -gt "1600"; then
JVM_MAX_HEAP=1600
elif test "$JVM_MAX_HEAP" -lt "512"; then
JVM_MAX_HEAP=512
fi
STACK_SIZE=1536
STACK_SIZE=$STACK_SIZE_64
JVM_MAX_HEAP=$JVM_HEAP_LIMIT_64
fi
$ECHO "Check if jvm arg is ok: -Xmx${JVM_MAX_HEAP}M" >&5
@ -65175,6 +65187,21 @@ $as_echo "$boot_jdk_jvmargs_big" >&6; }
JAVA_FLAGS_BIG=$boot_jdk_jvmargs_big
if test "x$OPENJDK_TARGET_CPU_BITS" = "x32"; then
BOOTCYCLE_MAX_HEAP=$JVM_HEAP_LIMIT_32
BOOTCYCLE_STACK_SIZE=$STACK_SIZE_32
else
BOOTCYCLE_MAX_HEAP=$JVM_HEAP_LIMIT_64
BOOTCYCLE_STACK_SIZE=$STACK_SIZE_64
fi
BOOTCYCLE_JVM_ARGS_BIG="$BOOTCYCLE_JVM_ARGS_BIG -Xmx${BOOTCYCLE_MAX_HEAP}M"
BOOTCYCLE_JVM_ARGS_BIG="$BOOTCYCLE_JVM_ARGS_BIG -XX:ThreadStackSize=$BOOTCYCLE_STACK_SIZE"
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for bootcycle boot jdk java command for big workloads" >&5
$as_echo_n "checking flags for bootcycle boot jdk java command for big workloads... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $BOOTCYCLE_JVM_ARGS_BIG" >&5
$as_echo "$BOOTCYCLE_JVM_ARGS_BIG" >&6; }
# By default, the main javac compilations use big
JAVA_FLAGS_JAVAC="$JAVA_FLAGS_BIG"
@ -66132,6 +66159,10 @@ $as_echo "no, does not work effectively with icecc" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no, does not work with Solaris Studio" >&5
$as_echo "no, does not work with Solaris Studio" >&6; }
USE_PRECOMPILED_HEADER=0
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no, does not work with xlc" >&5
$as_echo "no, does not work with xlc" >&6; }
USE_PRECOMPILED_HEADER=0
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }

@ -333,7 +333,7 @@ AC_DEFUN_ONCE([HOTSPOT_ENABLE_DISABLE_GTEST],
AC_MSG_RESULT([no, forced])
BUILD_GTEST="false"
elif test "x$enable_hotspot_gtest" = "x"; then
if test "x$GTEST_DIR_EXISTS" = "xtrue"; then
if test "x$GTEST_DIR_EXISTS" = "xtrue" && test "x$OPENJDK_TARGET_OS" != "xaix"; then
AC_MSG_RESULT([yes])
BUILD_GTEST="true"
else

@ -197,11 +197,15 @@ AC_DEFUN_ONCE([LIB_SETUP_MISC_LIBS],
################################################################################
AC_DEFUN_ONCE([LIB_SETUP_SOLARIS_STLPORT],
[
if test "$OPENJDK_TARGET_OS" = "solaris"; then
if test "$OPENJDK_TARGET_OS" = "solaris" && test "x$BUILD_GTEST" = "xtrue"; then
# Find the root of the Solaris Studio installation from the compiler path
SOLARIS_STUDIO_DIR="$(dirname $CC)/.."
STLPORT_LIB="$SOLARIS_STUDIO_DIR/lib/stlport4$OPENJDK_TARGET_CPU_ISADIR/libstlport.so.1"
AC_MSG_CHECKING([for libstlport.so.1])
if ! test -f "$STLPORT_LIB" && test "x$OPENJDK_TARGET_CPU_ISADIR" = "x/sparcv9"; then
# SS12u3 has libstlport under 'stlport4/v9' instead of 'stlport4/sparcv9'
STLPORT_LIB="$SOLARIS_STUDIO_DIR/lib/stlport4/v9/libstlport.so.1"
fi
if test -f "$STLPORT_LIB"; then
AC_MSG_RESULT([yes, $STLPORT_LIB])
BASIC_FIXUP_PATH([STLPORT_LIB])

@ -578,7 +578,7 @@ SJAVAC_SERVER_JAVA=@FIXPATH@ @FIXPATH_DETACH_FLAG@ $(SJAVAC_SERVER_JAVA_CMD) \
JAVAC_FLAGS?=@JAVAC_FLAGS@
BUILD_JAVA_FLAGS:=-Xms64M -Xmx1100M
BUILD_JAVA_FLAGS := @BOOTCYCLE_JVM_ARGS_BIG@
BUILD_JAVA=@FIXPATH@ $(BUILD_JDK)/bin/java $(BUILD_JAVA_FLAGS)
# Use ?= as this can be overridden from bootcycle-spec.gmk

@ -102,10 +102,13 @@ diff_text() {
# Ignore date strings in class files.
# Anonymous lambda classes get randomly assigned counters in their names.
if test "x$SUFFIX" = "xclass"; then
if [ "$NAME" = "module-info.class" ] || [ "$NAME" = "SystemModules.class" ]
then
# The SystemModules.class and module-info.class have several issues
# with random ordering of elements in HashSets.
if [ "$NAME" = "SystemModules.class" ]; then
# The SystemModules.class is not comparable. The way it is generated is
# too random. It can even be of different size for no apparent reason.
TMP=""
elif [ "$NAME" = "module-info.class" ]; then
# The module-info.class have several issues with random ordering of
# elements in HashSets.
MODULES_CLASS_FILTER="$SED \
-e 's/,$//' \
-e 's/;$//' \
@ -369,6 +372,14 @@ compare_general_files() {
$CAT $OTHER_DIR/$f | eval "$HTML_FILTER" > $OTHER_FILE &
$CAT $THIS_DIR/$f | eval "$HTML_FILTER" > $THIS_FILE &
wait
elif [ "$f" = "./lib/classlist" ]; then
# The classlist files may have some lines in random order
OTHER_FILE=$WORK_DIR/$f.other
THIS_FILE=$WORK_DIR/$f.this
$MKDIR -p $(dirname $OTHER_FILE) $(dirname $THIS_FILE)
$RM $OTHER_FILE $THIS_FILE
$CAT $OTHER_DIR/$f | $SORT > $OTHER_FILE
$CAT $THIS_DIR/$f | $SORT > $THIS_FILE
else
OTHER_FILE=$OTHER_DIR/$f
THIS_FILE=$THIS_DIR/$f
@ -651,7 +662,7 @@ compare_bin_file() {
OTHER_DIZ_FILE=${OTHER_FILE_BASE}.diz
else
# Some files, jli.dll, appears twice in the image but only one of
# thme has a diz file next to it.
# them has a diz file next to it.
OTHER_DIZ_FILE="$($FIND $OTHER_DIR -name $DIZ_NAME | $SED 1q)"
if [ ! -f "$OTHER_DIZ_FILE" ]; then
# As a last resort, look for diz file in the whole build output
@ -1335,6 +1346,24 @@ if [ "$SKIP_DEFAULT" != "true" ]; then
OTHER_JDK="$OTHER/images/jdk"
OTHER_JRE="$OTHER/images/jre"
echo "Selecting jdk images for compare"
elif [ -d "$(ls -d $THIS/licensee-src/build/*/images/jdk)" ] \
&& [ -d "$(ls -d $OTHER/licensee-src/build/*/images/jdk)" ]
then
echo "Selecting licensee images for compare"
# Simply override the THIS and OTHER dir with the build dir from
# the nested licensee source build for the rest of the script
# execution.
OLD_THIS="$THIS"
OLD_OTHER="$OTHER"
THIS="$(ls -d $THIS/licensee-src/build/*)"
OTHER="$(ls -d $OTHER/licensee-src/build/*)"
THIS_JDK="$THIS/images/jdk"
THIS_JRE="$THIS/images/jre"
OTHER_JDK="$OTHER/images/jdk"
OTHER_JRE="$OTHER/images/jre"
# Rewrite the path to tools that are used from the build
JIMAGE="$(echo "$JIMAGE" | $SED "s|$OLD_THIS|$THIS|g")"
JAVAP="$(echo "$JAVAP" | $SED "s|$OLD_THIS|$THIS|g")"
else
echo "No common images found."
exit 1

@ -254,7 +254,7 @@ var getJibProfilesProfiles = function (input, common) {
build_cpu: "x64",
dependencies: concat(common.dependencies, "devkit"),
configure_args: concat(common.configure_args, common.configure_args_32bit,
"--with-jvm-variants=minimal,client,server", "--with-zlib=system"),
"--with-jvm-variants=minimal,server", "--with-zlib=system"),
default_make_targets: common.default_make_targets
},
@ -295,8 +295,7 @@ var getJibProfilesProfiles = function (input, common) {
target_cpu: "x86",
build_cpu: "x64",
dependencies: concat(common.dependencies, "devkit", "freetype"),
configure_args: concat(common.configure_args,
"--with-jvm-variants=client,server", common.configure_args_32bit),
configure_args: concat(common.configure_args, common.configure_args_32bit),
default_make_targets: common.default_make_targets
}
};

@ -367,3 +367,4 @@ daf533920b1266603b5cbdab31908d2a931c5361 jdk-9+119
a39131aafc51a6fd8836e6ebe1b04458702ce7d6 jdk-9+122
e33a34cc551907617d8129c4faaf1a5a7e61d21c jdk-9+123
45121d5afb9d5bfadab75378572ad96832e0809e jdk-9+124
1d48e67d1b91eb9f72e49e69a4021edb85e357fc jdk-9+125

@ -527,3 +527,5 @@ b64432bae5271735fd53300b2005b713e98ef411 jdk-9+114
af6b4ad908e732d23021f12e8322b204433d5cf6 jdk-9+122
75f81e1fecfb444f34f357295fe06af60e2762d9 jdk-9+123
479631362b4930be985245ea063d87d821a472eb jdk-9+124
bb640b49741af3f57f9994129934c46fc173219f jdk-9+125
adc8c84b7cf8c540d920182f78a2bc982366432a jdk-9+126

@ -45,7 +45,8 @@ ifeq ($(call check-jvm-feature, dtrace), true)
$(DTRACE_GENSRC_DIR)/%.h: $(DTRACE_SOURCE_DIR)/%.d
$(call LogInfo, Generating dtrace header file $(@F))
$(call MakeDir, $(@D) $(DTRACE_SUPPORT_DIR))
$(call ExecuteWithLog, $(DTRACE_SUPPORT_DIR)/$(@F).d, $(CC) -E $(DTRACE_CPP_FLAGS) $< > $(DTRACE_SUPPORT_DIR)/$(@F).d)
$(call ExecuteWithLog, $(DTRACE_SUPPORT_DIR)/$(@F).d, \
( $(CC) -E $(DTRACE_CPP_FLAGS) $< > $(DTRACE_SUPPORT_DIR)/$(@F).d ) )
$(call ExecuteWithLog, $@, $(DTRACE) $(DTRACE_FLAGS) -h -o $@ -s $(DTRACE_SUPPORT_DIR)/$(@F).d)
# Process all .d files in DTRACE_SOURCE_DIR. They are:

@ -68,7 +68,7 @@ ifeq ($(call check-jvm-feature, dtrace), true)
$1: $$(BUILD_DTRACE_GEN_OFFSETS)
$$(call LogInfo, Generating dtrace $2 file $$(@F))
$$(call MakeDir, $$(@D))
$$(call ExecuteWithLog, $$@, $$(DTRACE_GEN_OFFSETS_TOOL) -$$(strip $2) > $$@)
$$(call ExecuteWithLog, $$@, ( $$(DTRACE_GEN_OFFSETS_TOOL) -$$(strip $2) > $$@ ) )
TARGETS += $1
endef

@ -50,8 +50,9 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
$(HOTSPOT_TOPDIR)/test/runtime/BoolReturn \
$(HOTSPOT_TOPDIR)/test/compiler/floatingpoint/ \
$(HOTSPOT_TOPDIR)/test/compiler/calls \
$(HOTSPOT_TOPDIR)/test/compiler/native \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetNamedModule \
$(HOTSPOT_TOPDIR)/test/testlibrary/jvmti \
$(HOTSPOT_TOPDIR)/test/compiler/jvmci/jdk.vm.ci.code.test \
#
# Add conditional directories here when needed.
@ -64,6 +65,7 @@ endif
ifeq ($(TOOLCHAIN_TYPE), solstudio)
BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_liboverflow := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libSimpleClassFileLoadHook := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libGetNamedModuleTest := -lc
endif
BUILD_HOTSPOT_JTREG_OUTPUT_DIR := $(BUILD_OUTPUT)/support/test/hotspot/jtreg/native

@ -2434,7 +2434,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ ldrsb(r0, field);
__ push(ztos);
// Rewrite bytecode to be faster
if (!is_static) {
if (rc == may_rewrite) {
// use btos rewriting, no truncating to t/f bit is needed for getfield.
patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
}
@ -2670,7 +2670,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
if (!is_static) pop_and_check_object(obj);
__ andw(r0, r0, 0x1);
__ strb(r0, field);
if (!is_static) {
if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
}
__ b(Done);

@ -32,7 +32,7 @@ const int StackAlignmentInBytes = (2*wordSize);
// Indicates whether the C calling conventions require that
// 32-bit integer argument values are extended to 64 bits.
const bool CCallingConventionRequiresIntsAsLongs = false;
const bool CCallingConventionRequiresIntsAsLongs = true;
#define SUPPORTS_NATIVE_CX8

@ -26,17 +26,41 @@ import static jdk.vm.ci.meta.JavaKind.Void;
import static jdk.vm.ci.meta.Value.ILLEGAL;
import static jdk.vm.ci.sparc.SPARC.REGISTER_SAFE_AREA_SIZE;
import static jdk.vm.ci.sparc.SPARC.d0;
import static jdk.vm.ci.sparc.SPARC.d10;
import static jdk.vm.ci.sparc.SPARC.d12;
import static jdk.vm.ci.sparc.SPARC.d14;
import static jdk.vm.ci.sparc.SPARC.d16;
import static jdk.vm.ci.sparc.SPARC.d18;
import static jdk.vm.ci.sparc.SPARC.d2;
import static jdk.vm.ci.sparc.SPARC.d20;
import static jdk.vm.ci.sparc.SPARC.d22;
import static jdk.vm.ci.sparc.SPARC.d24;
import static jdk.vm.ci.sparc.SPARC.d26;
import static jdk.vm.ci.sparc.SPARC.d28;
import static jdk.vm.ci.sparc.SPARC.d30;
import static jdk.vm.ci.sparc.SPARC.d4;
import static jdk.vm.ci.sparc.SPARC.d6;
import static jdk.vm.ci.sparc.SPARC.d8;
import static jdk.vm.ci.sparc.SPARC.f0;
import static jdk.vm.ci.sparc.SPARC.f1;
import static jdk.vm.ci.sparc.SPARC.f11;
import static jdk.vm.ci.sparc.SPARC.f13;
import static jdk.vm.ci.sparc.SPARC.f15;
import static jdk.vm.ci.sparc.SPARC.f17;
import static jdk.vm.ci.sparc.SPARC.f19;
import static jdk.vm.ci.sparc.SPARC.f2;
import static jdk.vm.ci.sparc.SPARC.f21;
import static jdk.vm.ci.sparc.SPARC.f23;
import static jdk.vm.ci.sparc.SPARC.f25;
import static jdk.vm.ci.sparc.SPARC.f27;
import static jdk.vm.ci.sparc.SPARC.f29;
import static jdk.vm.ci.sparc.SPARC.f3;
import static jdk.vm.ci.sparc.SPARC.f31;
import static jdk.vm.ci.sparc.SPARC.f4;
import static jdk.vm.ci.sparc.SPARC.f5;
import static jdk.vm.ci.sparc.SPARC.f6;
import static jdk.vm.ci.sparc.SPARC.f7;
import static jdk.vm.ci.sparc.SPARC.f9;
import static jdk.vm.ci.sparc.SPARC.g0;
import static jdk.vm.ci.sparc.SPARC.g2;
import static jdk.vm.ci.sparc.SPARC.g6;
@ -95,11 +119,6 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
private final RegisterAttributes[] attributesMap;
/**
* Does native code (C++ code) spill arguments in registers to the parent frame?
*/
private final boolean addNativeRegisterArgumentSlots;
@Override
public RegisterArray getAllocatableRegisters() {
return allocatable;
@ -124,10 +143,18 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
private final RegisterArray cpuCallerParameterRegisters = new RegisterArray(o0, o1, o2, o3, o4, o5);
private final RegisterArray cpuCalleeParameterRegisters = new RegisterArray(i0, i1, i2, i3, i4, i5);
private final RegisterArray fpuFloatParameterRegisters = new RegisterArray(f0, f1, f2, f3, f4, f5, f6, f7);
private final RegisterArray fpuDoubleParameterRegisters = new RegisterArray(d0, null, d2, null, d4, null, d6, null);
private final RegisterArray fpuFloatJavaParameterRegisters = new RegisterArray(f0, f1, f2, f3, f4, f5, f6, f7);
private final RegisterArray fpuDoubleJavaParameterRegisters = new RegisterArray(d0, null, d2, null, d4, null, d6, null);
// @formatter:off
private final RegisterArray fpuFloatNativeParameterRegisters = new RegisterArray(
f1, f3, f5, f7, f9, f11, f13, f15,
f17, f19, f21, f23, f25, f27, f29, f31);
private final RegisterArray fpuDoubleNativeParameterRegisters = new RegisterArray(
d0, d2, d4, d6, d8, d10, d12, d14,
d16, d18, d20, d22, d24, d26, d28, d30);
private final RegisterArray callerSaveRegisters;
/**
@ -170,7 +197,6 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
public SPARCHotSpotRegisterConfig(TargetDescription target, RegisterArray allocatable) {
this.target = target;
this.allocatable = allocatable;
this.addNativeRegisterArgumentSlots = false;
HashSet<Register> callerSaveSet = new HashSet<>(target.arch.getAvailableValueRegisters().asList());
for (Register cs : windowSaveRegisters) {
callerSaveSet.remove(cs);
@ -220,7 +246,7 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
return hotspotType == HotSpotCallingConventionType.JavaCallee ? cpuCalleeParameterRegisters : cpuCallerParameterRegisters;
case Double:
case Float:
return fpuFloatParameterRegisters;
return fpuFloatJavaParameterRegisters;
default:
throw JVMCIError.shouldNotReachHere("Unknown JavaKind " + kind);
}
@ -233,48 +259,77 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
int currentGeneral = 0;
int currentFloating = 0;
int currentStackOffset = 0;
boolean isNative = type == HotSpotCallingConventionType.NativeCall;
for (int i = 0; i < parameterTypes.length; i++) {
final JavaKind kind = parameterTypes[i].getJavaKind().getStackKind();
switch (kind) {
case Byte:
case Boolean:
case Short:
case Char:
case Int:
case Long:
case Object:
if (currentGeneral < generalParameterRegisters.size()) {
Register register = generalParameterRegisters.get(currentGeneral++);
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
}
break;
case Double:
if (currentFloating < fpuFloatParameterRegisters.size()) {
if (currentFloating % 2 != 0) {
// Make register number even to be a double reg
currentFloating++;
if (isNative) {
RegisterArray registerSet;
switch (kind) {
case Byte:
case Boolean:
case Short:
case Char:
case Int:
case Long:
case Object:
registerSet = generalParameterRegisters;
break;
case Double:
registerSet = fpuDoubleNativeParameterRegisters;
break;
case Float:
registerSet = fpuFloatNativeParameterRegisters;
break;
default:
throw JVMCIError.shouldNotReachHere();
}
if (i < registerSet.size()) {
locations[i] = registerSet.get(i).asValue(valueKindFactory.getValueKind(kind));
currentStackOffset += target.arch.getWordSize();
}
} else {
switch (kind) {
case Byte:
case Boolean:
case Short:
case Char:
case Int:
case Long:
case Object:
if (currentGeneral < generalParameterRegisters.size()) {
Register register = generalParameterRegisters.get(currentGeneral++);
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
}
Register register = fpuDoubleParameterRegisters.get(currentFloating);
currentFloating += 2; // Only every second is a double register
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
}
break;
case Float:
if (currentFloating < fpuFloatParameterRegisters.size()) {
Register register = fpuFloatParameterRegisters.get(currentFloating++);
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
}
break;
default:
throw JVMCIError.shouldNotReachHere();
break;
case Double:
if (currentFloating < fpuFloatJavaParameterRegisters.size()) {
if (currentFloating % 2 != 0) {
// Make register number even to be a double reg
currentFloating++;
}
Register register = fpuDoubleJavaParameterRegisters.get(currentFloating);
currentFloating += 2; // Only every second is a double register
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
}
break;
case Float:
if (currentFloating < fpuFloatJavaParameterRegisters.size()) {
Register register = fpuFloatJavaParameterRegisters.get(currentFloating++);
locations[i] = register.asValue(valueKindFactory.getValueKind(kind));
}
break;
default:
throw JVMCIError.shouldNotReachHere();
}
}
if (locations[i] == null) {
ValueKind<?> valueKind = valueKindFactory.getValueKind(kind);
// Stack slot is always aligned to its size in bytes but minimum wordsize
int typeSize = valueKind.getPlatformKind().getSizeInBytes();
if (isNative) {
currentStackOffset += target.arch.getWordSize() - typeSize;
}
currentStackOffset = roundUp(currentStackOffset, typeSize);
int slotOffset = currentStackOffset + REGISTER_SAFE_AREA_SIZE;
locations[i] = StackSlot.get(valueKind, slotOffset, !type.out);
@ -284,15 +339,7 @@ public class SPARCHotSpotRegisterConfig implements RegisterConfig {
JavaKind returnKind = returnType == null ? Void : returnType.getJavaKind();
AllocatableValue returnLocation = returnKind == Void ? ILLEGAL : getReturnRegister(returnKind, type).asValue(valueKindFactory.getValueKind(returnKind.getStackKind()));
int outArgSpillArea;
if (type == HotSpotCallingConventionType.NativeCall && addNativeRegisterArgumentSlots) {
// Space for native callee which may spill our outgoing arguments
outArgSpillArea = Math.min(locations.length, generalParameterRegisters.size()) * target.wordSize;
} else {
outArgSpillArea = 0;
}
return new CallingConvention(currentStackOffset + outArgSpillArea, returnLocation, locations);
return new CallingConvention(currentStackOffset, returnLocation, locations);
}
private static int roundUp(int number, int mod) {

@ -32,6 +32,11 @@ import jdk.vm.ci.services.Services;
final class HotSpotJVMCICompilerConfig {
/**
* This factory allows JVMCI initialization to succeed but raises an error if the VM asks JVMCI
* to perform a compilation. This allows the reflective parts of the JVMCI API to be used
* without requiring a compiler implementation to be available.
*/
private static class DummyCompilerFactory extends JVMCICompilerFactory implements JVMCICompiler {
public HotSpotCompilationRequestResult compileMethod(CompilationRequest request) {
@ -67,7 +72,6 @@ final class HotSpotJVMCICompilerConfig {
for (JVMCICompilerFactory f : Services.load(JVMCICompilerFactory.class)) {
if (f.getCompilerName().equals(compilerName)) {
Services.exportJVMCITo(f.getClass());
f.onSelection();
factory = f;
}
}
@ -75,8 +79,21 @@ final class HotSpotJVMCICompilerConfig {
throw new JVMCIError("JVMCI compiler '%s' not found", compilerName);
}
} else {
factory = new DummyCompilerFactory();
// Auto select a single available compiler
for (JVMCICompilerFactory f : Services.load(JVMCICompilerFactory.class)) {
if (factory == null) {
factory = f;
} else {
// Multiple factories seen - cancel auto selection
factory = null;
break;
}
}
if (factory == null) {
factory = new DummyCompilerFactory();
}
}
factory.onSelection();
compilerFactory = factory;
}
return compilerFactory;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,10 +28,12 @@ import static jdk.vm.ci.hotspot.HotSpotResolvedObjectTypeImpl.fromObjectClass;
import jdk.vm.ci.common.JVMCIError;
import jdk.vm.ci.meta.ConstantReflectionProvider;
import jdk.vm.ci.meta.JavaConstant;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.MethodHandleAccessProvider;
import jdk.vm.ci.meta.ResolvedJavaField;
import jdk.vm.ci.meta.ResolvedJavaMethod;
import jdk.vm.ci.meta.ResolvedJavaType;
import jdk.vm.ci.meta.Signature;
public class HotSpotMethodHandleAccessProvider implements MethodHandleAccessProvider {
@ -51,46 +53,80 @@ public class HotSpotMethodHandleAccessProvider implements MethodHandleAccessProv
static final ResolvedJavaMethod lambdaFormCompileToBytecodeMethod;
static final HotSpotResolvedJavaField memberNameVmtargetField;
static final ResolvedJavaType CLASS = fromObjectClass(LazyInitialization.class);
/**
* Search for an instance field with the given name in a class.
*
* @param className name of the class to search in
* @param fieldName name of the field to be searched
* @return resolved java field
* @param fieldType resolved Java type of the field
* @return resolved Java field
* @throws ClassNotFoundException
* @throws NoSuchFieldError
*/
private static ResolvedJavaField findFieldInClass(String className, String fieldName) throws ClassNotFoundException {
private static ResolvedJavaField findFieldInClass(String className, String fieldName, ResolvedJavaType fieldType)
throws ClassNotFoundException {
Class<?> clazz = Class.forName(className);
ResolvedJavaType type = runtime().fromClass(clazz);
ResolvedJavaField[] fields = type.getInstanceFields(false);
for (ResolvedJavaField field : fields) {
if (field.getName().equals(fieldName)) {
if (field.getName().equals(fieldName) && field.getType().equals(fieldType)) {
return field;
}
}
return null;
throw new NoSuchFieldError(fieldType.getName() + " " + className + "." + fieldName);
}
private static ResolvedJavaMethod findMethodInClass(String className, String methodName) throws ClassNotFoundException {
private static ResolvedJavaMethod findMethodInClass(String className, String methodName,
ResolvedJavaType resultType, ResolvedJavaType[] parameterTypes) throws ClassNotFoundException {
Class<?> clazz = Class.forName(className);
HotSpotResolvedObjectTypeImpl type = fromObjectClass(clazz);
ResolvedJavaMethod result = null;
for (ResolvedJavaMethod method : type.getDeclaredMethods()) {
if (method.getName().equals(methodName)) {
assert result == null : "more than one method found: " + className + "." + methodName;
if (method.getName().equals(methodName) && signatureMatches(method, resultType, parameterTypes)) {
result = method;
}
}
assert result != null : "method not found: " + className + "." + methodName;
if (result == null) {
StringBuilder sig = new StringBuilder("(");
for (ResolvedJavaType t : parameterTypes) {
sig.append(t.getName()).append(",");
}
if (sig.length() > 1) {
sig.replace(sig.length() - 1, sig.length(), ")");
} else {
sig.append(')');
}
throw new NoSuchMethodError(resultType.getName() + " " + className + "." + methodName + sig.toString());
}
return result;
}
private static boolean signatureMatches(ResolvedJavaMethod m, ResolvedJavaType resultType,
ResolvedJavaType[] parameterTypes) {
Signature s = m.getSignature();
if (!s.getReturnType(CLASS).equals(resultType)) {
return false;
}
for (int i = 0; i < s.getParameterCount(false); ++i) {
if (!s.getParameterType(i, CLASS).equals(parameterTypes[i])) {
return false;
}
}
return true;
}
static {
try {
methodHandleFormField = findFieldInClass("java.lang.invoke.MethodHandle", "form");
lambdaFormVmentryField = findFieldInClass("java.lang.invoke.LambdaForm", "vmentry");
lambdaFormCompileToBytecodeMethod = findMethodInClass("java.lang.invoke.LambdaForm", "compileToBytecode");
memberNameVmtargetField = (HotSpotResolvedJavaField) findFieldInClass("java.lang.invoke.MemberName", "vmtarget");
methodHandleFormField = findFieldInClass("java.lang.invoke.MethodHandle", "form",
fromObjectClass(Class.forName("java.lang.invoke.LambdaForm")));
lambdaFormVmentryField = findFieldInClass("java.lang.invoke.LambdaForm", "vmentry",
fromObjectClass(Class.forName("java.lang.invoke.MemberName")));
lambdaFormCompileToBytecodeMethod = findMethodInClass("java.lang.invoke.LambdaForm", "compileToBytecode",
new HotSpotResolvedPrimitiveType(JavaKind.Void), new ResolvedJavaType[]{});
memberNameVmtargetField = (HotSpotResolvedJavaField) findFieldInClass("java.lang.invoke.MemberName", "vmtarget",
new HotSpotResolvedPrimitiveType(JavaKind.Long));
} catch (Throwable ex) {
throw new JVMCIError(ex);
}
@ -134,14 +170,12 @@ public class HotSpotMethodHandleAccessProvider implements MethodHandleAccessProv
return null;
}
JavaConstant memberName;
if (forceBytecodeGeneration) {
/* Invoke non-public method: MemberName LambdaForm.compileToBytecode() */
memberName = LazyInitialization.lambdaFormCompileToBytecodeMethod.invoke(lambdaForm, new JavaConstant[0]);
} else {
/* Load non-public field: MemberName LambdaForm.vmentry */
memberName = constantReflection.readFieldValue(LazyInitialization.lambdaFormVmentryField, lambdaForm);
LazyInitialization.lambdaFormCompileToBytecodeMethod.invoke(lambdaForm, new JavaConstant[0]);
}
/* Load non-public field: MemberName LambdaForm.vmentry */
JavaConstant memberName = constantReflection.readFieldValue(LazyInitialization.lambdaFormVmentryField, lambdaForm);
return getTargetMethod(memberName);
}
@ -163,3 +197,4 @@ public class HotSpotMethodHandleAccessProvider implements MethodHandleAccessProv
return compilerToVM().getResolvedJavaMethod(object, LazyInitialization.memberNameVmtargetField.offset());
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,17 +22,49 @@
*
*/
// This is only a stub. Will flesh out later when/if we add further support
// for PASE.
#include "libo4.hpp"
bool libo4::init() { return false; }
void libo4::cleanup() {}
bool libo4::get_memory_info (unsigned long long* p_virt_total, unsigned long long* p_real_total,
unsigned long long* p_real_free, unsigned long long* p_pgsp_total, unsigned long long* p_pgsp_free) {
// global variables
// whether initialization worked
static bool g_initialized = false;
//////////////////////////
// class libo4 - impl //
//////////////////////////
bool libo4::init() {
if (g_initialized) {
return true;
}
return false;
}
bool libo4::get_load_avg (double* p_avg1, double* p_avg5, double* p_avg15) { return false; }
bool libo4::realpath (const char* file_name, char* resolved_name, int resolved_name_len) { return false; }
void libo4::cleanup() {
if (g_initialized) {
g_initialized = false;
}
}
bool libo4::get_memory_info(unsigned long long* p_virt_total,
unsigned long long* p_real_total,
unsigned long long* p_real_free,
unsigned long long* p_pgsp_total,
unsigned long long* p_pgsp_free) {
return false;
}
bool libo4::get_load_avg(double* p_avg1, double* p_avg5, double* p_avg15) {
return false;
}
bool libo4::realpath(const char* file_name, char* resolved_name,
int resolved_name_len) {
return false;
}
bool libo4::removeEscapeMessageFromJoblogByContext(const void* context) {
// Note: no tracing here! We run in signal handling context
return false;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,56 +22,69 @@
*
*/
// A C++ wrapper around the libo4 porting library. The libo4 porting library
// is a set of bridge functions into native AS/400 functionality.
// Class libo4 is a C++ wrapper around the libo4 porting library. It handles
// basic stuff like dynamic loading, library initialization etc.
// The libo4 porting library is a set of functions that bridge from the AIX
// runtime environment on OS/400 (aka PASE layer) into native OS/400
// functionality (aka ILE layer) to close some functional gaps that exist in
// the PASE layer.
#ifndef OS_AIX_VM_LIBO4_HPP
#define OS_AIX_VM_LIBO4_HPP
class libo4 {
public:
// Initialize the libo4 porting library.
// Returns true if succeeded, false if error.
static bool init();
// cleanup of the libo4 porting library.
// Triggers cleanup of the libo4 porting library.
static void cleanup();
// returns a number of memory statistics from the
// AS/400.
// Returns a number of memory statistics from OS/400.
//
// See libo4.h for details on this API.
//
// Specify NULL for numbers you are not interested in.
//
// returns false if an error happened. Activate OsMisc trace for
// Returns false if an error happened. Activate OsMisc trace for
// trace output.
//
static bool get_memory_info (unsigned long long* p_virt_total, unsigned long long* p_real_total,
unsigned long long* p_real_free, unsigned long long* p_pgsp_total, unsigned long long* p_pgsp_free);
static bool get_memory_info(unsigned long long* p_virt_total,
unsigned long long* p_real_total,
unsigned long long* p_real_free,
unsigned long long* p_pgsp_total,
unsigned long long* p_pgsp_free);
// returns information about system load
// Returns information about system load
// (similar to "loadavg()" under other Unices)
//
// Specify NULL for numbers you are not interested in.
//
// returns false if an error happened. Activate OsMisc trace for
// trace output.
//
static bool get_load_avg (double* p_avg1, double* p_avg5, double* p_avg15);
// this is a replacement for the "realpath()" API which does not really work
// on PASE
// See libo4.h for details on this API.
//
// Specify NULL for numbers you are not interested in.
//
// returns false if an error happened. Activate OsMisc trace for
// Returns false if an error happened. Activate OsMisc trace for
// trace output.
//
static bool realpath (const char* file_name,
char* resolved_name, int resolved_name_len);
static bool get_load_avg(double* p_avg1, double* p_avg5, double* p_avg15);
// This is a replacement for the "realpath()" API which does not really work
// in PASE together with the (case insensitive but case preserving)
// filesystem on OS/400.
//
// See libo4.h for details on this API.
//
// Returns false if an error happened. Activate OsMisc trace for
// trace output.
//
static bool realpath(const char* file_name, char* resolved_name,
int resolved_name_len);
// Call libo4_RemoveEscapeMessageFromJoblogByContext API to remove messages
// from the OS/400 job log.
//
// See libo4.h for details on this API.
static bool removeEscapeMessageFromJoblogByContext(const void* context);
};
#endif // OS_AIX_VM_LIBO4_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -180,10 +180,12 @@ bool libperfstat::get_cpuinfo(cpuinfo_t* pci) {
memset (&psct, '\0', sizeof(psct));
if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(PERFSTAT_CPU_TOTAL_T_LATEST), 1)) {
if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_61), 1)) {
if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_53), 1)) {
if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_71), 1)) {
if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_61), 1)) {
if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_53), 1)) {
trcVerbose("perfstat_cpu_total() failed (errno=%d)", errno);
return false;
}
}
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -337,10 +337,109 @@ typedef struct { /* global cpu information AIX 7.1 */
int spurrflag; /* set if running in spurr mode */
u_longlong_t version; /* version number (1, 2, etc.,) */
/* >>>>> END OF STRUCTURE DEFINITION <<<<< */
#define CURR_VERSION_CPU_TOTAL 1 /* Incremented by one for every new release *
/* #define CURR_VERSION_CPU_TOTAL 1 Incremented by one for every new release *
* of perfstat_cpu_total_t data structure */
} perfstat_cpu_total_t_71;
typedef struct { /* global cpu information AIX 7.2 / 6.1 TL6 (see oslevel -r) */
int ncpus; /* number of active logical processors */
int ncpus_cfg; /* number of configured processors */
char description[IDENTIFIER_LENGTH]; /* processor description (type/official name) */
u_longlong_t processorHZ; /* processor speed in Hz */
u_longlong_t user; /* raw total number of clock ticks spent in user mode */
u_longlong_t sys; /* raw total number of clock ticks spent in system mode */
u_longlong_t idle; /* raw total number of clock ticks spent idle */
u_longlong_t wait; /* raw total number of clock ticks spent waiting for I/O */
u_longlong_t pswitch; /* number of process switches (change in currently running process) */
u_longlong_t syscall; /* number of system calls executed */
u_longlong_t sysread; /* number of read system calls executed */
u_longlong_t syswrite; /* number of write system calls executed */
u_longlong_t sysfork; /* number of forks system calls executed */
u_longlong_t sysexec; /* number of execs system calls executed */
u_longlong_t readch; /* number of characters tranferred with read system call */
u_longlong_t writech; /* number of characters tranferred with write system call */
u_longlong_t devintrs; /* number of device interrupts */
u_longlong_t softintrs; /* number of software interrupts */
time_t lbolt; /* number of ticks since last reboot */
u_longlong_t loadavg[3]; /* (1<<SBITS) times the average number of runnables processes during the last 1, 5 and 15 minutes. */
/* To calculate the load average, divide the numbers by (1<<SBITS). SBITS is defined in <sys/proc.h>. */
u_longlong_t runque; /* length of the run queue (processes ready) */
u_longlong_t swpque; /* ength of the swap queue (processes waiting to be paged in) */
u_longlong_t bread; /* number of blocks read */
u_longlong_t bwrite; /* number of blocks written */
u_longlong_t lread; /* number of logical read requests */
u_longlong_t lwrite; /* number of logical write requests */
u_longlong_t phread; /* number of physical reads (reads on raw devices) */
u_longlong_t phwrite; /* number of physical writes (writes on raw devices) */
u_longlong_t runocc; /* updated whenever runque is updated, i.e. the runqueue is occupied.
* This can be used to compute the simple average of ready processes */
u_longlong_t swpocc; /* updated whenever swpque is updated. i.e. the swpqueue is occupied.
* This can be used to compute the simple average processes waiting to be paged in */
u_longlong_t iget; /* number of inode lookups */
u_longlong_t namei; /* number of vnode lookup from a path name */
u_longlong_t dirblk; /* number of 512-byte block reads by the directory search routine to locate an entry for a file */
u_longlong_t msg; /* number of IPC message operations */
u_longlong_t sema; /* number of IPC semaphore operations */
u_longlong_t rcvint; /* number of tty receive interrupts */
u_longlong_t xmtint; /* number of tyy transmit interrupts */
u_longlong_t mdmint; /* number of modem interrupts */
u_longlong_t tty_rawinch; /* number of raw input characters */
u_longlong_t tty_caninch; /* number of canonical input characters (always zero) */
u_longlong_t tty_rawoutch;/* number of raw output characters */
u_longlong_t ksched; /* number of kernel processes created */
u_longlong_t koverf; /* kernel process creation attempts where:
* -the user has forked to their maximum limit
* -the configuration limit of processes has been reached */
u_longlong_t kexit; /* number of kernel processes that became zombies */
u_longlong_t rbread; /* number of remote read requests */
u_longlong_t rcread; /* number of cached remote reads */
u_longlong_t rbwrt; /* number of remote writes */
u_longlong_t rcwrt; /* number of cached remote writes */
u_longlong_t traps; /* number of traps */
int ncpus_high; /* index of highest processor online */
u_longlong_t puser; /* raw number of physical processor tics in user mode */
u_longlong_t psys; /* raw number of physical processor tics in system mode */
u_longlong_t pidle; /* raw number of physical processor tics idle */
u_longlong_t pwait; /* raw number of physical processor tics waiting for I/O */
u_longlong_t decrintrs; /* number of decrementer tics interrupts */
u_longlong_t mpcrintrs; /* number of mpc's received interrupts */
u_longlong_t mpcsintrs; /* number of mpc's sent interrupts */
u_longlong_t phantintrs; /* number of phantom interrupts */
u_longlong_t idle_donated_purr; /* number of idle cycles donated by a dedicated partition enabled for donation */
u_longlong_t idle_donated_spurr;/* number of idle spurr cycles donated by a dedicated partition enabled for donation */
u_longlong_t busy_donated_purr; /* number of busy cycles donated by a dedicated partition enabled for donation */
u_longlong_t busy_donated_spurr;/* number of busy spurr cycles donated by a dedicated partition enabled for donation */
u_longlong_t idle_stolen_purr; /* number of idle cycles stolen by the hypervisor from a dedicated partition */
u_longlong_t idle_stolen_spurr; /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
u_longlong_t busy_stolen_purr; /* number of busy cycles stolen by the hypervisor from a dedicated partition */
u_longlong_t busy_stolen_spurr; /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
short iowait; /* number of processes that are asleep waiting for buffered I/O */
short physio; /* number of processes waiting for raw I/O */
longlong_t twait; /* number of threads that are waiting for filesystem direct(cio) */
u_longlong_t hpi; /* number of hypervisor page-ins */
u_longlong_t hpit; /* Time spent in hypervisor page-ins (in nanoseconds) */
u_longlong_t puser_spurr; /* number of spurr cycles spent in user mode */
u_longlong_t psys_spurr; /* number of spurr cycles spent in kernel mode */
u_longlong_t pidle_spurr; /* number of spurr cycles spent in idle mode */
u_longlong_t pwait_spurr; /* number of spurr cycles spent in wait mode */
int spurrflag; /* set if running in spurr mode */
u_longlong_t version; /* version number (1, 2, etc.,) */
u_longlong_t tb_last; /*time base counter */
u_longlong_t purr_coalescing; /* If the calling partition is
* authorized to see pool wide statistics then
* PURR cycles consumed to coalesce data
* else set to zero.*/
u_longlong_t spurr_coalescing; /* If the calling partition is
* authorized to see pool wide statistics then
* SPURR cycles consumed to coalesce data
* else set to zero.*/
/* >>>>> END OF STRUCTURE DEFINITION <<<<< */
#define CURR_VERSION_CPU_TOTAL 2 /* Incremented by one for every new release *
* of perfstat_cpu_total_t data structure */
} perfstat_cpu_total_t_72;
typedef union {
uint w;
struct {
@ -756,7 +855,7 @@ typedef struct { /* WPAR identifier */
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
#define PERFSTAT_PARTITON_TOTAL_T_LATEST perfstat_partition_total_t_71_1/* latest perfstat_partition_total_t structure */
#define PERFSTAT_CPU_TOTAL_T_LATEST perfstat_cpu_total_t_71 /* latest perfstat_cpu_total_t structure */
#define PERFSTAT_CPU_TOTAL_T_LATEST perfstat_cpu_total_t_72 /* latest perfstat_cpu_total_t structure */
#define PERFSTAT_WPAR_TOTAL_T_LATEST perfstat_wpar_total_t_71 /* latest perfstat_wpar_total_t structure */
class libperfstat {

@ -1742,11 +1742,11 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
}
typedef struct {
Elf32_Half code; // Actual value as defined in elf.h
Elf32_Half compat_class; // Compatibility of archs at VM's sense
char elf_class; // 32 or 64 bit
char endianess; // MSB or LSB
char* name; // String representation
Elf32_Half code; // Actual value as defined in elf.h
Elf32_Half compat_class; // Compatibility of archs at VM's sense
unsigned char elf_class; // 32 or 64 bit
unsigned char endianess; // MSB or LSB
char* name; // String representation
} arch_t;
#ifndef EM_486

@ -1320,36 +1320,8 @@ bool os::getTimesSecs(double* process_real_time,
}
bool os::supports_vtime() { return true; }
bool os::enable_vtime() {
int fd = ::open("/proc/self/ctl", O_WRONLY);
if (fd == -1) {
return false;
}
long cmd[] = { PCSET, PR_MSACCT };
int res = ::write(fd, cmd, sizeof(long) * 2);
::close(fd);
if (res != sizeof(long) * 2) {
return false;
}
return true;
}
bool os::vtime_enabled() {
int fd = ::open("/proc/self/status", O_RDONLY);
if (fd == -1) {
return false;
}
pstatus_t status;
int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
::close(fd);
if (res != sizeof(pstatus_t)) {
return false;
}
return status.pr_flags & PR_MSACCT;
}
bool os::enable_vtime() { return false; }
bool os::vtime_enabled() { return false; }
double os::elapsedVTime() {
return (double)gethrvtime() / (double)hrtime_hz;

@ -5250,6 +5250,12 @@ int os::fork_and_exec(char* cmd) {
static int mallocDebugIntervalCounter = 0;
static int mallocDebugCounter = 0;
// For debugging possible bugs inside HeapWalk (a ring buffer)
#define SAVE_COUNT 8
static PROCESS_HEAP_ENTRY saved_heap_entries[SAVE_COUNT];
static int saved_heap_entry_index;
bool os::check_heap(bool force) {
if (++mallocDebugCounter < MallocVerifyStart && !force) return true;
if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) {
@ -5270,13 +5276,28 @@ bool os::check_heap(bool force) {
if (HeapLock(heap) != 0) {
PROCESS_HEAP_ENTRY phe;
phe.lpData = NULL;
memset(saved_heap_entries, 0, sizeof(saved_heap_entries));
saved_heap_entry_index = 0;
int count = 0;
while (HeapWalk(heap, &phe) != 0) {
count ++;
if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) &&
!HeapValidate(heap, 0, phe.lpData)) {
tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter);
tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData);
tty->print_cr("corrupted block near address %#x, length %d, count %d", phe.lpData, phe.cbData, count);
HeapUnlock(heap);
fatal("corrupted C heap");
} else {
// Save previous seen entries in a ring buffer. We have seen strange
// heap corruption fatal errors that produced mdmp files, but when we load
// these mdmp files in WinDBG, "!heap -triage" shows no error.
// We can examine the saved_heap_entries[] array in the mdmp file to
// diagnose such seemingly spurious errors reported by HeapWalk.
saved_heap_entries[saved_heap_entry_index++] = phe;
if (saved_heap_entry_index >= SAVE_COUNT) {
saved_heap_entry_index = 0;
}
}
}
DWORD err = GetLastError();

@ -204,11 +204,13 @@ ciEnv::ciEnv(Arena* arena) : _ciEnv_arena(mtCompiler) {
}
ciEnv::~ciEnv() {
CompilerThread* current_thread = CompilerThread::current();
_factory->remove_symbols();
// Need safepoint to clear the env on the thread. RedefineClasses might
// be reading it.
GUARDED_VM_ENTRY(current_thread->set_env(NULL);)
GUARDED_VM_ENTRY(
CompilerThread* current_thread = CompilerThread::current();
_factory->remove_symbols();
// Need safepoint to clear the env on the thread. RedefineClasses might
// be reading it.
current_thread->set_env(NULL);
)
}
// ------------------------------------------------------------------

@ -490,7 +490,8 @@ class CompileReplay : public StackObj {
int comp_level = parse_int(comp_level_label);
// old version w/o comp_level
if (had_error() && (error_message() == comp_level_label)) {
comp_level = CompLevel_full_optimization;
// use highest available tier
comp_level = TieredCompilation ? TieredStopAtLevel : CompLevel_highest_tier;
}
if (!is_valid_comp_level(comp_level)) {
return;

@ -224,7 +224,7 @@ static const jchar ONE_CHAR[] = { (jchar) 0x8180};
static const jbyte THREE_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82};
static const jbyte FOUR_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82, (jbyte) 0x83};
static const jchar TWO_CHAR[] = { (jchar) 0x8180, (jchar) 0x8382};
static const jint ONE_INT[] = { 0x83828180};
static const jint ONE_INT[] = { (jint)0x83828180};
static const jbyte SIX_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82, (jbyte) 0x83, (jbyte) 0x84, (jbyte) 0x85};
static const jchar THREE_CHAR[] = { (jchar) 0x8180, (jchar) 0x8382, (jchar) 0x8584};
static const jbyte EIGHT_BYTE[] = {
@ -235,7 +235,7 @@ static const jchar FOUR_CHAR[] = {
(jchar) 0x8180, (jchar) 0x8382,
(jchar) 0x8584, (jchar) 0x8786};
static const jint TWO_INT[] = { 0x83828180, 0x87868584};
static const jint TWO_INT[] = { (jint)0x83828180, (jint)0x87868584};
static const juint MURMUR3_32_X86_CHECK_VALUE = 0xB0F57EE3;

@ -142,7 +142,9 @@ void ClassLoaderData::oops_do(OopClosure* f, KlassClosure* klass_closure, bool m
f->do_oop(&_class_loader);
_dependencies.oops_do(f);
_handles->oops_do(f);
if (_handles != NULL) {
_handles->oops_do(f);
}
if (klass_closure != NULL) {
classes_do(klass_closure);
}
@ -501,13 +503,26 @@ ClassLoaderData::~ClassLoaderData() {
}
}
/**
* Returns true if this class loader data is for the platform class loader.
*/
// Returns true if this class loader data is for the system class loader.
bool ClassLoaderData::is_system_class_loader_data() const {
return SystemDictionary::is_system_class_loader(class_loader());
}
// Returns true if this class loader data is for the platform class loader.
bool ClassLoaderData::is_platform_class_loader_data() const {
return SystemDictionary::is_platform_class_loader(class_loader());
}
// Returns true if this class loader data is one of the 3 builtin
// (boot, application/system or platform) class loaders. Note, the
// builtin loaders are not freed by a GC.
bool ClassLoaderData::is_builtin_class_loader_data() const {
Handle classLoaderHandle = class_loader();
return (is_the_null_class_loader_data() ||
SystemDictionary::is_system_class_loader(classLoaderHandle) ||
SystemDictionary::is_platform_class_loader(classLoaderHandle));
}
Metaspace* ClassLoaderData::metaspace_non_null() {
assert(!DumpSharedSpaces, "wrong metaspace!");
// If the metaspace has not been allocated, create a new one. Might want
@ -957,12 +972,6 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure,
data = _head;
while (data != NULL) {
if (data->is_alive(is_alive_closure)) {
if (data->packages_defined()) {
data->packages()->purge_all_package_exports();
}
if (data->modules_defined()) {
data->modules()->purge_all_module_reads();
}
// clean metaspace
if (walk_all_metadata) {
data->classes_do(InstanceKlass::purge_previous_versions);
@ -990,6 +999,23 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure,
}
if (seen_dead_loader) {
// Walk a ModuleEntry's reads and a PackageEntry's exports lists
// to determine if there are modules on those lists that are now
// dead and should be removed. A module's life cycle is equivalent
// to its defining class loader's life cycle. Since a module is
// considered dead if its class loader is dead, these walks must
// occur after each class loader's aliveness is determined.
data = _head;
while (data != NULL) {
if (data->packages_defined()) {
data->packages()->purge_all_package_exports();
}
if (data->modules_defined()) {
data->modules()->purge_all_module_reads();
}
data = data->next();
}
post_class_unload_events();
}

@ -270,7 +270,9 @@ class ClassLoaderData : public CHeapObj<mtClass> {
bool is_the_null_class_loader_data() const {
return this == _the_null_class_loader_data;
}
bool is_system_class_loader_data() const;
bool is_platform_class_loader_data() const;
bool is_builtin_class_loader_data() const;
// The Metaspace is created lazily so may be NULL. This
// method will allocate a Metaspace if needed.

@ -248,7 +248,7 @@ inline void SimpleCompactHashtable::iterate(const I& iterator) {
} else {
u4*entry_max = _entries + BUCKET_OFFSET(_buckets[i + 1]);
while (entry < entry_max) {
iterator.do_value(_base_address, entry[0]);
iterator.do_value(_base_address, entry[1]);
entry += 2;
}
}

@ -871,12 +871,17 @@ void java_lang_Class::fixup_module_field(KlassHandle k, Handle module) {
int java_lang_Class::oop_size(oop java_class) {
assert(_oop_size_offset != 0, "must be set");
return java_class->int_field(_oop_size_offset);
int size = java_class->int_field(_oop_size_offset);
assert(size > 0, "Oop size must be greater than zero, not %d", size);
return size;
}
void java_lang_Class::set_oop_size(oop java_class, int size) {
assert(_oop_size_offset != 0, "must be set");
assert(size > 0, "Oop size must be greater than zero, not %d", size);
java_class->int_field_put(_oop_size_offset, size);
}
int java_lang_Class::static_oop_field_count(oop java_class) {
assert(_static_oop_field_count_offset != 0, "must be set");
return java_class->int_field(_static_oop_field_count_offset);

@ -275,7 +275,6 @@ class java_lang_Class : AllStatic {
static int static_oop_field_count(oop java_class);
static void set_static_oop_field_count(oop java_class, int size);
static GrowableArray<Klass*>* fixup_mirror_list() {
return _fixup_mirror_list;
}

@ -40,7 +40,6 @@
ModuleEntry* ModuleEntryTable::_javabase_module = NULL;
void ModuleEntry::set_location(Symbol* location) {
if (_location != NULL) {
// _location symbol's refcounts are managed by ModuleEntry,
@ -115,10 +114,35 @@ void ModuleEntry::add_read(ModuleEntry* m) {
// Lazily create a module's reads list
_reads = new (ResourceObj::C_HEAP, mtModule)GrowableArray<ModuleEntry*>(MODULE_READS_SIZE, true);
}
// Determine, based on this newly established read edge to module m,
// if this module's read list should be walked at a GC safepoint.
set_read_walk_required(m->loader_data());
// Establish readability to module m
_reads->append_if_missing(m);
}
}
// If the module's loader, that a read edge is being established to, is
// not the same loader as this module's and is not one of the 3 builtin
// class loaders, then this module's reads list must be walked at GC
// safepoint. Modules have the same life cycle as their defining class
// loaders and should be removed if dead.
void ModuleEntry::set_read_walk_required(ClassLoaderData* m_loader_data) {
assert_locked_or_safepoint(Module_lock);
if (!_must_walk_reads &&
loader_data() != m_loader_data &&
!m_loader_data->is_builtin_class_loader_data()) {
_must_walk_reads = true;
if (log_is_enabled(Trace, modules)) {
ResourceMark rm;
log_trace(modules)("ModuleEntry::set_read_walk_required(): module %s reads list must be walked",
(name() != NULL) ? name()->as_C_string() : UNNAMED_MODULE);
}
}
}
bool ModuleEntry::has_reads() const {
assert_locked_or_safepoint(Module_lock);
return ((_reads != NULL) && !_reads->is_empty());
@ -127,14 +151,28 @@ bool ModuleEntry::has_reads() const {
// Purge dead module entries out of reads list.
void ModuleEntry::purge_reads() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
if (has_reads()) {
if (_must_walk_reads && has_reads()) {
// This module's _must_walk_reads flag will be reset based
// on the remaining live modules on the reads list.
_must_walk_reads = false;
if (log_is_enabled(Trace, modules)) {
ResourceMark rm;
log_trace(modules)("ModuleEntry::purge_reads(): module %s reads list being walked",
(name() != NULL) ? name()->as_C_string() : UNNAMED_MODULE);
}
// Go backwards because this removes entries that are dead.
int len = _reads->length();
for (int idx = len - 1; idx >= 0; idx--) {
ModuleEntry* module_idx = _reads->at(idx);
ClassLoaderData* cld = module_idx->loader();
if (cld->is_unloading()) {
ClassLoaderData* cld_idx = module_idx->loader_data();
if (cld_idx->is_unloading()) {
_reads->delete_at(idx);
} else {
// Update the need to walk this module's reads based on live modules
set_read_walk_required(cld_idx);
}
}
}
@ -248,7 +286,7 @@ ModuleEntry* ModuleEntryTable::new_entry(unsigned int hash, Handle module_handle
entry->set_module(loader_data->add_handle(module_handle));
}
entry->set_loader(loader_data);
entry->set_loader_data(loader_data);
entry->set_version(version);
entry->set_location(location);
@ -375,11 +413,11 @@ void ModuleEntryTable::print(outputStream* st) {
void ModuleEntry::print(outputStream* st) {
ResourceMark rm;
st->print_cr("entry "PTR_FORMAT" name %s module "PTR_FORMAT" loader %s version %s location %s strict %s next "PTR_FORMAT,
st->print_cr("entry " PTR_FORMAT " name %s module " PTR_FORMAT " loader %s version %s location %s strict %s next " PTR_FORMAT,
p2i(this),
name() == NULL ? UNNAMED_MODULE : name()->as_C_string(),
p2i(module()),
loader()->loader_name(),
loader_data()->loader_name(),
version() != NULL ? version()->as_C_string() : "NULL",
location() != NULL ? location()->as_C_string() : "NULL",
BOOL_TO_STR(!can_read_all_unnamed()), p2i(next()));
@ -401,5 +439,5 @@ void ModuleEntryTable::verify() {
}
void ModuleEntry::verify() {
guarantee(loader() != NULL, "A module entry must be associated with a loader.");
guarantee(loader_data() != NULL, "A module entry must be associated with a loader.");
}

@ -43,6 +43,7 @@ class ModuleClosure;
// It contains:
// - Symbol* containing the module's name.
// - pointer to the java.lang.reflect.Module for this module.
// - pointer to the java.security.ProtectionDomain shared by classes defined to this module.
// - ClassLoaderData*, class loader of this module.
// - a growable array containg other module entries that this module can read.
// - a flag indicating if this module can read all unnamed modules.
@ -54,56 +55,58 @@ private:
jobject _module; // java.lang.reflect.Module
jobject _pd; // java.security.ProtectionDomain, cached
// for shared classes from this module
ClassLoaderData* _loader;
ClassLoaderData* _loader_data;
GrowableArray<ModuleEntry*>* _reads; // list of modules that are readable by this module
Symbol* _version; // module version number
Symbol* _location; // module location
bool _can_read_all_unnamed;
bool _has_default_read_edges; // JVMTI redefine/retransform support
bool _must_walk_reads; // walk module's reads list at GC safepoints to purge out dead modules
TRACE_DEFINE_TRACE_ID_FIELD;
enum {MODULE_READS_SIZE = 101}; // Initial size of list of modules that the module can read.
public:
void init() {
_module = NULL;
_loader = NULL;
_loader_data = NULL;
_pd = NULL;
_reads = NULL;
_version = NULL;
_location = NULL;
_can_read_all_unnamed = false;
_has_default_read_edges = false;
_must_walk_reads = false;
}
Symbol* name() const { return literal(); }
void set_name(Symbol* n) { set_literal(n); }
Symbol* name() const { return literal(); }
void set_name(Symbol* n) { set_literal(n); }
jobject module() const { return _module; }
void set_module(jobject j) { _module = j; }
jobject module() const { return _module; }
void set_module(jobject j) { _module = j; }
// The shared ProtectionDomain reference is set once the VM loads a shared class
// originated from the current Module. The referenced ProtectionDomain object is
// created by the ClassLoader when loading a class (shared or non-shared) from the
// Module for the first time. This ProtectionDomain object is used for all
// classes from the Module loaded by the same ClassLoader.
Handle shared_protection_domain();
void set_shared_protection_domain(ClassLoaderData *loader_data,
Handle pd);
Handle shared_protection_domain();
void set_shared_protection_domain(ClassLoaderData *loader_data, Handle pd);
ClassLoaderData* loader() const { return _loader; }
void set_loader(ClassLoaderData* l) { _loader = l; }
ClassLoaderData* loader_data() const { return _loader_data; }
void set_loader_data(ClassLoaderData* l) { _loader_data = l; }
Symbol* version() const { return _version; }
void set_version(Symbol* version);
Symbol* version() const { return _version; }
void set_version(Symbol* version);
Symbol* location() const { return _location; }
void set_location(Symbol* location);
Symbol* location() const { return _location; }
void set_location(Symbol* location);
bool can_read(ModuleEntry* m) const;
bool has_reads() const;
void add_read(ModuleEntry* m);
bool can_read(ModuleEntry* m) const;
bool has_reads() const;
void add_read(ModuleEntry* m);
void set_read_walk_required(ClassLoaderData* m_loader_data);
bool is_named() const { return (literal() != NULL); }
bool is_named() const { return (name() != NULL); }
bool can_read_all_unnamed() const {
assert(is_named() || _can_read_all_unnamed == true,
@ -178,7 +181,7 @@ private:
ModuleEntry* _unnamed_module;
ModuleEntry* new_entry(unsigned int hash, Handle module_handle, Symbol* name, Symbol* version,
Symbol* location, ClassLoaderData* class_loader);
Symbol* location, ClassLoaderData* loader_data);
void add_entry(int index, ModuleEntry* new_entry);
int entry_size() const { return BasicHashtable<mtModule>::entry_size(); }

@ -113,7 +113,7 @@ static PackageEntry* get_package_entry(ModuleEntry* module_entry, jstring packag
const char *package_name = java_lang_String::as_utf8_string(JNIHandles::resolve_non_null(package));
if (package_name == NULL) return NULL;
TempNewSymbol pkg_symbol = SymbolTable::new_symbol(package_name, CHECK_NULL);
PackageEntryTable* package_entry_table = module_entry->loader()->packages();
PackageEntryTable* package_entry_table = module_entry->loader_data()->packages();
assert(package_entry_table != NULL, "Unexpected null package entry table");
return package_entry_table->lookup_only(pkg_symbol);
}
@ -820,6 +820,28 @@ jobject Modules::get_module_by_package_name(jobject loader, jstring package, TRA
}
jobject Modules::get_named_module(Handle h_loader, const char* package_str, TRAPS) {
assert(ModuleEntryTable::javabase_defined(),
"Attempt to call get_named_module before java.base is defined");
assert(h_loader.is_null() || java_lang_ClassLoader::is_subclass(h_loader->klass()),
"Class loader is not a subclass of java.lang.ClassLoader");
assert(package_str != NULL, "the package_str should not be NULL");
if (strlen(package_str) == 0) {
return NULL;
}
TempNewSymbol package_sym = SymbolTable::new_symbol(package_str, CHECK_NULL);
const PackageEntry* const pkg_entry =
get_package_entry_by_name(package_sym, h_loader, THREAD);
const ModuleEntry* const module_entry = (pkg_entry != NULL ? pkg_entry->module() : NULL);
if (module_entry != NULL && module_entry->module() != NULL && module_entry->is_named()) {
return JNIHandles::make_local(THREAD, JNIHandles::resolve(module_entry->module()));
}
return NULL;
}
// This method is called by JFR and by the above method.
jobject Modules::get_module(Symbol* package_name, Handle h_loader, TRAPS) {
const PackageEntry* const pkg_entry =
@ -868,7 +890,7 @@ void Modules::add_module_package(jobject module, jstring package, TRAPS) {
package_name, module_entry->name()->as_C_string());
TempNewSymbol pkg_symbol = SymbolTable::new_symbol(package_name, CHECK);
PackageEntryTable* package_table = module_entry->loader()->packages();
PackageEntryTable* package_table = module_entry->loader_data()->packages();
assert(package_table != NULL, "Missing package_table");
bool pkg_exists = false;

@ -121,6 +121,7 @@ public:
// IllegalArgumentException is thrown if loader is neither null nor a subtype of
// java/lang/ClassLoader.
static jobject get_module_by_package_name(jobject loader, jstring package, TRAPS);
static jobject get_named_module(Handle h_loader, const char* package, TRAPS);
// If package is defined by loader, return the
// java.lang.reflect.Module object for the module in which the package is defined.

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "classfile/moduleEntry.hpp"
#include "classfile/packageEntry.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "oops/symbol.hpp"
#include "runtime/handles.inline.hpp"
@ -53,12 +54,40 @@ void PackageEntry::add_qexport(ModuleEntry* m) {
if (!has_qual_exports_list()) {
// Lazily create a package's qualified exports list.
// Initial size is small, do not anticipate export lists to be large.
_qualified_exports =
new (ResourceObj::C_HEAP, mtModule) GrowableArray<ModuleEntry*>(QUAL_EXP_SIZE, true);
_qualified_exports = new (ResourceObj::C_HEAP, mtModule) GrowableArray<ModuleEntry*>(QUAL_EXP_SIZE, true);
}
// Determine, based on this newly established export to module m,
// if this package's export list should be walked at a GC safepoint.
set_export_walk_required(m->loader_data());
// Establish exportability to module m
_qualified_exports->append_if_missing(m);
}
// If the module's loader, that an export is being established to, is
// not the same loader as this module's and is not one of the 3 builtin
// class loaders, then this package's export list must be walked at GC
// safepoint. Modules have the same life cycle as their defining class
// loaders and should be removed if dead.
void PackageEntry::set_export_walk_required(ClassLoaderData* m_loader_data) {
assert_locked_or_safepoint(Module_lock);
ModuleEntry* this_pkg_mod = module();
if (!_must_walk_exports &&
(this_pkg_mod == NULL || this_pkg_mod->loader_data() != m_loader_data) &&
!m_loader_data->is_builtin_class_loader_data()) {
_must_walk_exports = true;
if (log_is_enabled(Trace, modules)) {
ResourceMark rm;
assert(name() != NULL, "PackageEntry without a valid name");
log_trace(modules)("PackageEntry::set_export_walk_required(): package %s defined in module %s, exports list must be walked",
name()->as_C_string(),
(this_pkg_mod == NULL || this_pkg_mod->name() == NULL) ?
UNNAMED_MODULE : this_pkg_mod->name()->as_C_string());
}
}
}
// Set the package's exported states based on the value of the ModuleEntry.
void PackageEntry::set_exported(ModuleEntry* m) {
MutexLocker m1(Module_lock);
@ -96,14 +125,34 @@ void PackageEntry::set_is_exported_allUnnamed() {
// Remove dead module entries within the package's exported list.
void PackageEntry::purge_qualified_exports() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
if (_qualified_exports != NULL) {
if (_must_walk_exports &&
_qualified_exports != NULL &&
!_qualified_exports->is_empty()) {
ModuleEntry* pkg_module = module();
// This package's _must_walk_exports flag will be reset based
// on the remaining live modules on the exports list.
_must_walk_exports = false;
if (log_is_enabled(Trace, modules)) {
ResourceMark rm;
assert(name() != NULL, "PackageEntry without a valid name");
ModuleEntry* pkg_mod = module();
log_trace(modules)("PackageEntry::purge_qualified_exports(): package %s defined in module %s, exports list being walked",
name()->as_C_string(),
(pkg_mod == NULL || pkg_mod->name() == NULL) ? UNNAMED_MODULE : pkg_mod->name()->as_C_string());
}
// Go backwards because this removes entries that are dead.
int len = _qualified_exports->length();
for (int idx = len - 1; idx >= 0; idx--) {
ModuleEntry* module_idx = _qualified_exports->at(idx);
ClassLoaderData* cld = module_idx->loader();
if (cld->is_unloading()) {
ClassLoaderData* cld_idx = module_idx->loader_data();
if (cld_idx->is_unloading()) {
_qualified_exports->delete_at(idx);
} else {
// Update the need to walk this package's exports based on live modules
set_export_walk_required(cld_idx);
}
}
}
@ -297,8 +346,8 @@ void PackageEntryTable::print(outputStream* st) {
void PackageEntry::print(outputStream* st) {
ResourceMark rm;
st->print_cr("package entry "PTR_FORMAT" name %s module %s classpath_index "
INT32_FORMAT " is_exported_unqualified %d is_exported_allUnnamed %d " "next "PTR_FORMAT,
st->print_cr("package entry " PTR_FORMAT " name %s module %s classpath_index "
INT32_FORMAT " is_exported_unqualified %d is_exported_allUnnamed %d " "next " PTR_FORMAT,
p2i(this), name()->as_C_string(),
(module()->is_named() ? module()->name()->as_C_string() : UNNAMED_MODULE),
_classpath_index, _is_exported_unqualified, _is_exported_allUnnamed, p2i(next()));

@ -69,6 +69,7 @@ private:
s2 _classpath_index;
bool _is_exported_unqualified;
bool _is_exported_allUnnamed;
bool _must_walk_exports;
GrowableArray<ModuleEntry*>* _exported_pending_delete; // transitioned from qualified to unqualified, delete at safepoint
GrowableArray<ModuleEntry*>* _qualified_exports;
TRACE_DEFINE_TRACE_ID_FIELD;
@ -82,6 +83,7 @@ public:
_classpath_index = -1;
_is_exported_unqualified = false;
_is_exported_allUnnamed = false;
_must_walk_exports = false;
_exported_pending_delete = NULL;
_qualified_exports = NULL;
}
@ -147,6 +149,7 @@ public:
// add the module to the package's qualified exports
void add_qexport(ModuleEntry* m);
void set_export_walk_required(ClassLoaderData* m_loader_data);
PackageEntry* next() const {
return (PackageEntry*)HashtableEntry<Symbol*, mtModule>::next();

@ -175,9 +175,18 @@ bool SystemDictionary::is_parallelDefine(Handle class_loader) {
return false;
}
/**
* Returns true if the passed class loader is the platform class loader.
*/
// Returns true if the passed class loader is the builtin application class loader
// or a custom system class loader. A customer system class loader can be
// specified via -Djava.system.class.loader.
bool SystemDictionary::is_system_class_loader(Handle class_loader) {
if (class_loader.is_null()) {
return false;
}
return (class_loader->klass() == SystemDictionary::jdk_internal_loader_ClassLoaders_AppClassLoader_klass() ||
class_loader() == _java_system_loader);
}
// Returns true if the passed class loader is the platform class loader.
bool SystemDictionary::is_platform_class_loader(Handle class_loader) {
if (class_loader.is_null()) {
return false;

@ -660,6 +660,7 @@ public:
static instanceKlassHandle load_shared_class(Symbol* class_name,
Handle class_loader,
TRAPS);
static bool is_system_class_loader(Handle class_loader);
static bool is_platform_class_loader(Handle class_loader);
protected:

@ -1256,9 +1256,7 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
// set between the last GC or pause and now. We need to clear the
// incremental collection set and then start rebuilding it afresh
// after this full GC.
abandon_collection_set(collection_set()->inc_head());
collection_set()->clear_incremental();
collection_set()->stop_incremental_building();
abandon_collection_set(collection_set());
tear_down_region_sets(false /* free_list_only */);
collector_state()->set_gcs_are_young(true);
@ -1379,7 +1377,6 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
_verifier->check_bitmaps("Full GC End");
// Start a new incremental collection set for the next pause
assert(collection_set()->head() == NULL, "must be");
collection_set()->start_incremental_building();
clear_cset_fast_test();
@ -1724,8 +1721,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
_old_marking_cycles_started(0),
_old_marking_cycles_completed(0),
_in_cset_fast_test(),
_worker_cset_start_region(NULL),
_worker_cset_start_region_time_stamp(NULL),
_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
@ -1748,8 +1743,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
uint n_queues = ParallelGCThreads;
_task_queues = new RefToScanQueueSet(n_queues);
_worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
_worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
_evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
for (uint i = 0; i < n_queues; i++) {
@ -1758,7 +1751,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
_task_queues->register_queue(i, q);
::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
}
clear_cset_start_regions();
// Initialize the G1EvacuationFailureALot counters and flags.
NOT_PRODUCT(reset_evacuation_should_fail();)
@ -1987,6 +1979,8 @@ jint G1CollectedHeap::initialize() {
_preserved_marks_set.init(ParallelGCThreads);
_collection_set.initialize(max_regions());
return JNI_OK;
}
@ -2420,117 +2414,12 @@ G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
_hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
}
// Clear the cached CSet starting regions and (more importantly)
// the time stamps. Called when we reset the GC time stamp.
void G1CollectedHeap::clear_cset_start_regions() {
assert(_worker_cset_start_region != NULL, "sanity");
assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
for (uint i = 0; i < ParallelGCThreads; i++) {
_worker_cset_start_region[i] = NULL;
_worker_cset_start_region_time_stamp[i] = 0;
}
}
// Given the id of a worker, obtain or calculate a suitable
// starting region for iterating over the current collection set.
HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
assert(get_gc_time_stamp() > 0, "should have been updated by now");
HeapRegion* result = NULL;
unsigned gc_time_stamp = get_gc_time_stamp();
if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
// Cached starting region for current worker was set
// during the current pause - so it's valid.
// Note: the cached starting heap region may be NULL
// (when the collection set is empty).
result = _worker_cset_start_region[worker_i];
assert(result == NULL || result->in_collection_set(), "sanity");
return result;
}
// The cached entry was not valid so let's calculate
// a suitable starting heap region for this worker.
// We want the parallel threads to start their collection
// set iteration at different collection set regions to
// avoid contention.
// If we have:
// n collection set regions
// p threads
// Then thread t will start at region floor ((t * n) / p)
result = collection_set()->head();
uint cs_size = collection_set()->region_length();
uint active_workers = workers()->active_workers();
uint end_ind = (cs_size * worker_i) / active_workers;
uint start_ind = 0;
if (worker_i > 0 &&
_worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
// Previous workers starting region is valid
// so let's iterate from there
start_ind = (cs_size * (worker_i - 1)) / active_workers;
OrderAccess::loadload();
result = _worker_cset_start_region[worker_i - 1];
}
for (uint i = start_ind; i < end_ind; i++) {
result = result->next_in_collection_set();
}
// Note: the calculated starting heap region may be NULL
// (when the collection set is empty).
assert(result == NULL || result->in_collection_set(), "sanity");
assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
"should be updated only once per pause");
_worker_cset_start_region[worker_i] = result;
OrderAccess::storestore();
_worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
return result;
}
void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
HeapRegion* r = collection_set()->head();
while (r != NULL) {
HeapRegion* next = r->next_in_collection_set();
if (cl->doHeapRegion(r)) {
cl->incomplete();
return;
}
r = next;
}
_collection_set.iterate(cl);
}
void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
HeapRegionClosure *cl) {
if (r == NULL) {
// The CSet is empty so there's nothing to do.
return;
}
assert(r->in_collection_set(),
"Start region must be a member of the collection set.");
HeapRegion* cur = r;
while (cur != NULL) {
HeapRegion* next = cur->next_in_collection_set();
if (cl->doHeapRegion(cur) && false) {
cl->incomplete();
return;
}
cur = next;
}
cur = collection_set()->head();
while (cur != r) {
HeapRegion* next = cur->next_in_collection_set();
if (cl->doHeapRegion(cur) && false) {
cl->incomplete();
return;
}
cur = next;
}
void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {
_collection_set.iterate_from(cl, worker_id, workers()->active_workers());
}
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
@ -3090,6 +2979,18 @@ void G1CollectedHeap::wait_for_root_region_scanning() {
g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
}
class G1PrintCollectionSetClosure : public HeapRegionClosure {
private:
G1HRPrinter* _hr_printer;
public:
G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
virtual bool doHeapRegion(HeapRegion* r) {
_hr_printer->cset(r);
return false;
}
};
bool
G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
assert_at_safepoint(true /* should_be_vm_thread */);
@ -3268,11 +3169,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
_cm->verify_no_cset_oops();
if (_hr_printer.is_active()) {
HeapRegion* hr = collection_set()->head();
while (hr != NULL) {
_hr_printer.cset(hr);
hr = hr->next_in_collection_set();
}
G1PrintCollectionSetClosure cl(&_hr_printer);
_collection_set.iterate(&cl);
}
// Initialize the GC alloc regions.
@ -3287,12 +3185,10 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
post_evacuate_collection_set(evacuation_info, &per_thread_states);
const size_t* surviving_young_words = per_thread_states.surviving_young_words();
free_collection_set(collection_set()->head(), evacuation_info, surviving_young_words);
free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
eagerly_reclaim_humongous_regions();
collection_set()->clear_head();
record_obj_copy_mem_stats();
_survivor_evac_stats.adjust_desired_plab_sz();
_old_evac_stats.adjust_desired_plab_sz();
@ -4704,120 +4600,139 @@ void G1CollectedHeap::scrub_rem_set() {
workers()->run_task(&g1_par_scrub_rs_task);
}
void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
size_t pre_used = 0;
FreeRegionList local_free_list("Local List for CSet Freeing");
class G1FreeCollectionSetClosure : public HeapRegionClosure {
private:
const size_t* _surviving_young_words;
double young_time_ms = 0.0;
double non_young_time_ms = 0.0;
FreeRegionList _local_free_list;
size_t _rs_lengths;
// Bytes used in successfully evacuated regions before the evacuation.
size_t _before_used_bytes;
// Bytes used in unsucessfully evacuated regions before the evacuation
size_t _after_used_bytes;
_eden.clear();
size_t _bytes_allocated_in_old_since_last_gc;
G1Policy* policy = g1_policy();
size_t _failure_used_words;
size_t _failure_waste_words;
double start_sec = os::elapsedTime();
bool non_young = true;
double _young_time;
double _non_young_time;
public:
G1FreeCollectionSetClosure(const size_t* surviving_young_words) :
HeapRegionClosure(),
_surviving_young_words(surviving_young_words),
_local_free_list("Local Region List for CSet Freeing"),
_rs_lengths(0),
_before_used_bytes(0),
_after_used_bytes(0),
_bytes_allocated_in_old_since_last_gc(0),
_failure_used_words(0),
_failure_waste_words(0),
_young_time(0.0),
_non_young_time(0.0) {
}
HeapRegion* cur = cs_head;
int age_bound = -1;
size_t rs_lengths = 0;
virtual bool doHeapRegion(HeapRegion* r) {
double start_time = os::elapsedTime();
while (cur != NULL) {
assert(!is_on_master_free_list(cur), "sanity");
if (non_young) {
if (cur->is_young()) {
double end_sec = os::elapsedTime();
double elapsed_ms = (end_sec - start_sec) * 1000.0;
non_young_time_ms += elapsed_ms;
bool is_young = r->is_young();
start_sec = os::elapsedTime();
non_young = false;
}
G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(!g1h->is_on_master_free_list(r), "sanity");
_rs_lengths += r->rem_set()->occupied_locked();
assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
g1h->clear_in_cset(r);
if (is_young) {
int index = r->young_index_in_cset();
assert(index != -1, "Young index in collection set must not be -1 for region %u", r->hrm_index());
assert((uint) index < g1h->collection_set()->young_region_length(), "invariant");
size_t words_survived = _surviving_young_words[index];
r->record_surv_words_in_group(words_survived);
} else {
if (!cur->is_young()) {
double end_sec = os::elapsedTime();
double elapsed_ms = (end_sec - start_sec) * 1000.0;
young_time_ms += elapsed_ms;
start_sec = os::elapsedTime();
non_young = true;
}
assert(r->young_index_in_cset() == -1, "Young index for old region %u in collection set must be -1", r->hrm_index());
}
rs_lengths += cur->rem_set()->occupied_locked();
HeapRegion* next = cur->next_in_collection_set();
assert(cur->in_collection_set(), "bad CS");
cur->set_next_in_collection_set(NULL);
clear_in_cset(cur);
if (cur->is_young()) {
int index = cur->young_index_in_cset();
assert(index != -1, "invariant");
assert((uint) index < collection_set()->young_region_length(), "invariant");
size_t words_survived = surviving_young_words[index];
cur->record_surv_words_in_group(words_survived);
if (!r->evacuation_failed()) {
assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
_before_used_bytes += r->used();
g1h->free_region(r, &_local_free_list, false /* par */, true /* locked */);
} else {
int index = cur->young_index_in_cset();
assert(index == -1, "invariant");
}
assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
(!cur->is_young() && cur->young_index_in_cset() == -1),
"invariant" );
if (!cur->evacuation_failed()) {
MemRegion used_mr = cur->used_region();
// And the region is empty.
assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
pre_used += cur->used();
free_region(cur, &local_free_list, false /* par */, true /* locked */);
} else {
cur->uninstall_surv_rate_group();
if (cur->is_young()) {
cur->set_young_index_in_cset(-1);
}
cur->set_evacuation_failed(false);
r->uninstall_surv_rate_group();
r->set_young_index_in_cset(-1);
r->set_evacuation_failed(false);
// When moving a young gen region to old gen, we "allocate" that whole region
// there. This is in addition to any already evacuated objects. Notify the
// policy about that.
// Old gen regions do not cause an additional allocation: both the objects
// still in the region and the ones already moved are accounted for elsewhere.
if (cur->is_young()) {
policy->add_bytes_allocated_in_old_since_last_gc(HeapRegion::GrainBytes);
if (is_young) {
_bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
}
// The region is now considered to be old.
cur->set_old();
r->set_old();
// Do some allocation statistics accounting. Regions that failed evacuation
// are always made old, so there is no need to update anything in the young
// gen statistics, but we need to update old gen statistics.
size_t used_words = cur->marked_bytes() / HeapWordSize;
_old_evac_stats.add_failure_used_and_waste(used_words, HeapRegion::GrainWords - used_words);
_old_set.add(cur);
evacuation_info.increment_collectionset_used_after(cur->used());
size_t used_words = r->marked_bytes() / HeapWordSize;
_failure_used_words += used_words;
_failure_waste_words += HeapRegion::GrainWords - used_words;
g1h->old_set_add(r);
_after_used_bytes += r->used();
}
cur = next;
if (is_young) {
_young_time += os::elapsedTime() - start_time;
} else {
_non_young_time += os::elapsedTime() - start_time;
}
return false;
}
evacuation_info.set_regions_freed(local_free_list.length());
policy->record_max_rs_lengths(rs_lengths);
FreeRegionList* local_free_list() { return &_local_free_list; }
size_t rs_lengths() const { return _rs_lengths; }
size_t before_used_bytes() const { return _before_used_bytes; }
size_t after_used_bytes() const { return _after_used_bytes; }
size_t bytes_allocated_in_old_since_last_gc() const { return _bytes_allocated_in_old_since_last_gc; }
size_t failure_used_words() const { return _failure_used_words; }
size_t failure_waste_words() const { return _failure_waste_words; }
double young_time() const { return _young_time; }
double non_young_time() const { return _non_young_time; }
};
void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
_eden.clear();
G1FreeCollectionSetClosure cl(surviving_young_words);
collection_set_iterate(&cl);
evacuation_info.set_regions_freed(cl.local_free_list()->length());
evacuation_info.increment_collectionset_used_after(cl.after_used_bytes());
G1Policy* policy = g1_policy();
policy->record_max_rs_lengths(cl.rs_lengths());
policy->cset_regions_freed();
double end_sec = os::elapsedTime();
double elapsed_ms = (end_sec - start_sec) * 1000.0;
prepend_to_freelist(cl.local_free_list());
decrement_summary_bytes(cl.before_used_bytes());
if (non_young) {
non_young_time_ms += elapsed_ms;
} else {
young_time_ms += elapsed_ms;
}
policy->add_bytes_allocated_in_old_since_last_gc(cl.bytes_allocated_in_old_since_last_gc());
prepend_to_freelist(&local_free_list);
decrement_summary_bytes(pre_used);
policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
_old_evac_stats.add_failure_used_and_waste(cl.failure_used_words(), cl.failure_waste_words());
policy->phase_times()->record_young_free_cset_time_ms(cl.young_time() * 1000.0);
policy->phase_times()->record_non_young_free_cset_time_ms(cl.non_young_time() * 1000.0);
collection_set->clear();
}
class G1FreeHumongousRegionClosure : public HeapRegionClosure {
@ -4960,25 +4875,22 @@ void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
cl.humongous_free_count());
}
// This routine is similar to the above but does not record
// any policy statistics or update free lists; we are abandoning
// the current incremental collection set in preparation of a
// full collection. After the full GC we will start to build up
// the incremental collection set again.
// This is only called when we're doing a full collection
// and is immediately followed by the tearing down of the young list.
void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
HeapRegion* cur = cs_head;
while (cur != NULL) {
HeapRegion* next = cur->next_in_collection_set();
assert(cur->in_collection_set(), "bad CS");
cur->set_next_in_collection_set(NULL);
clear_in_cset(cur);
cur->set_young_index_in_cset(-1);
cur = next;
class G1AbandonCollectionSetClosure : public HeapRegionClosure {
public:
virtual bool doHeapRegion(HeapRegion* r) {
assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
G1CollectedHeap::heap()->clear_in_cset(r);
r->set_young_index_in_cset(-1);
return false;
}
};
void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
G1AbandonCollectionSetClosure cl;
collection_set->iterate(&cl);
collection_set->clear();
collection_set->stop_incremental_building();
}
void G1CollectedHeap::set_free_regions_coming() {

@ -778,13 +778,13 @@ protected:
// The closure used to refine a single card.
RefineCardTableEntryClosure* _refine_cte_cl;
// After a collection pause, make the regions in the CS into free
// After a collection pause, convert the regions in the collection set into free
// regions.
void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
void free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
// Abandon the current collection set without recording policy
// statistics or updating free lists.
void abandon_collection_set(HeapRegion* cs_head);
void abandon_collection_set(G1CollectionSet* collection_set);
// The concurrent marker (and the thread it runs in.)
G1ConcurrentMark* _cm;
@ -930,16 +930,6 @@ protected:
// discovery.
G1CMIsAliveClosure _is_alive_closure_cm;
// Cache used by G1CollectedHeap::start_cset_region_for_worker().
HeapRegion** _worker_cset_start_region;
// Time stamp to validate the regions recorded in the cache
// used by G1CollectedHeap::start_cset_region_for_worker().
// The heap region entry for a given worker is valid iff
// the associated time stamp value matches the current value
// of G1CollectedHeap::_gc_time_stamp.
uint* _worker_cset_start_region_time_stamp;
volatile bool _free_regions_coming;
public:
@ -1211,19 +1201,14 @@ public:
HeapRegionClaimer* hrclaimer,
bool concurrent = false) const;
// Clear the cached cset start regions and (more importantly)
// the time stamps. Called when we reset the GC time stamp.
void clear_cset_start_regions();
// Given the id of a worker, obtain or calculate a suitable
// starting region for iterating over the current collection set.
HeapRegion* start_cset_region_for_worker(uint worker_i);
// Iterate over the regions (if any) in the current collection set.
void collection_set_iterate(HeapRegionClosure* blk);
// As above but starting from region r
void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
// Iterate over the regions (if any) in the current collection set. Starts the
// iteration over the entire collection set so that the start regions of a given
// worker id over the set active_workers are evenly spread across the set of
// collection set regions.
void collection_set_iterate_from(HeapRegionClosure *blk, uint worker_id);
HeapRegion* next_compaction_region(const HeapRegion* from) const;

@ -89,16 +89,13 @@ inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
}
inline void G1CollectedHeap::reset_gc_time_stamp() {
assert_at_safepoint(true);
_gc_time_stamp = 0;
OrderAccess::fence();
// Clear the cached CSet starting regions and time stamps.
// Their validity is dependent on the GC timestamp.
clear_cset_start_regions();
}
inline void G1CollectedHeap::increment_gc_time_stamp() {
assert_at_safepoint(true);
++_gc_time_stamp;
OrderAccess::fence();
}
inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {

@ -30,6 +30,7 @@
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/g1/heapRegionSet.hpp"
#include "logging/logStream.hpp"
#include "utilities/debug.hpp"
G1CollectorState* G1CollectionSet::collector_state() {
@ -55,48 +56,63 @@ G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
_eden_region_length(0),
_survivor_region_length(0),
_old_region_length(0),
_head(NULL),
_bytes_used_before(0),
_recorded_rs_lengths(0),
_collection_set_regions(NULL),
_collection_set_cur_length(0),
_collection_set_max_length(0),
// Incremental CSet attributes
_inc_build_state(Inactive),
_inc_head(NULL),
_inc_tail(NULL),
_inc_bytes_used_before(0),
_inc_recorded_rs_lengths(0),
_inc_recorded_rs_lengths_diffs(0),
_inc_predicted_elapsed_time_ms(0.0),
_inc_predicted_elapsed_time_ms_diffs(0.0),
_inc_region_length(0) {}
_inc_predicted_elapsed_time_ms_diffs(0.0) {
}
G1CollectionSet::~G1CollectionSet() {
if (_collection_set_regions != NULL) {
FREE_C_HEAP_ARRAY(uint, _collection_set_regions);
}
delete _cset_chooser;
}
void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
uint survivor_cset_region_length) {
assert_at_safepoint(true);
_eden_region_length = eden_cset_region_length;
_survivor_region_length = survivor_cset_region_length;
assert(young_region_length() == _inc_region_length, "should match %u == %u", young_region_length(), _inc_region_length);
assert((size_t) young_region_length() == _collection_set_cur_length,
"Young region length %u should match collection set length " SIZE_FORMAT, young_region_length(), _collection_set_cur_length);
_old_region_length = 0;
}
void G1CollectionSet::initialize(uint max_region_length) {
guarantee(_collection_set_regions == NULL, "Must only initialize once.");
_collection_set_max_length = max_region_length;
_collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
}
void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
_recorded_rs_lengths = rs_lengths;
}
// Add the heap region at the head of the non-incremental collection set
void G1CollectionSet::add_old_region(HeapRegion* hr) {
assert_at_safepoint(true);
assert(_inc_build_state == Active, "Precondition");
assert(hr->is_old(), "the region should be old");
assert(!hr->in_collection_set(), "should not already be in the CSet");
_g1->register_old_region_with_cset(hr);
hr->set_next_in_collection_set(_head);
_head = hr;
_collection_set_regions[_collection_set_cur_length++] = hr->hrm_index();
assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size.");
_bytes_used_before += hr->used();
size_t rs_length = hr->rem_set()->occupied();
_recorded_rs_lengths += rs_length;
@ -105,12 +121,10 @@ void G1CollectionSet::add_old_region(HeapRegion* hr) {
// Initialize the per-collection-set information
void G1CollectionSet::start_incremental_building() {
assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set.");
assert(_inc_build_state == Inactive, "Precondition");
_inc_head = NULL;
_inc_tail = NULL;
_inc_bytes_used_before = 0;
_inc_region_length = 0;
_inc_recorded_rs_lengths = 0;
_inc_recorded_rs_lengths_diffs = 0;
@ -151,6 +165,38 @@ void G1CollectionSet::finalize_incremental_building() {
_inc_predicted_elapsed_time_ms_diffs = 0.0;
}
void G1CollectionSet::clear() {
assert_at_safepoint(true);
_collection_set_cur_length = 0;
}
void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
iterate_from(cl, 0, 1);
}
void G1CollectionSet::iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const {
size_t len = _collection_set_cur_length;
OrderAccess::loadload();
if (len == 0) {
return;
}
size_t start_pos = (worker_id * len) / total_workers;
size_t cur_pos = start_pos;
do {
HeapRegion* r = G1CollectedHeap::heap()->region_at(_collection_set_regions[cur_pos]);
bool result = cl->doHeapRegion(r);
if (result) {
cl->incomplete();
return;
}
cur_pos++;
if (cur_pos == len) {
cur_pos = 0;
}
} while (cur_pos != start_pos);
}
void G1CollectionSet::update_young_region_prediction(HeapRegion* hr,
size_t new_rs_length) {
// Update the CSet information that is dependent on the new RS length
@ -183,8 +229,16 @@ void G1CollectionSet::add_young_region_common(HeapRegion* hr) {
assert(hr->is_young(), "invariant");
assert(_inc_build_state == Active, "Precondition");
hr->set_young_index_in_cset(_inc_region_length);
_inc_region_length++;
size_t collection_set_length = _collection_set_cur_length;
assert(collection_set_length <= INT_MAX, "Collection set is too large with %d entries", (int)collection_set_length);
hr->set_young_index_in_cset((int)collection_set_length);
_collection_set_regions[collection_set_length] = hr->hrm_index();
// Concurrent readers must observe the store of the value in the array before an
// update to the length field.
OrderAccess::storestore();
_collection_set_cur_length++;
assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set larger than maximum allowed.");
// This routine is used when:
// * adding survivor regions to the incremental cset at the end of an
@ -218,59 +272,81 @@ void G1CollectionSet::add_young_region_common(HeapRegion* hr) {
assert(!hr->in_collection_set(), "invariant");
_g1->register_young_region_with_cset(hr);
assert(hr->next_in_collection_set() == NULL, "invariant");
}
// Add the region at the RHS of the incremental cset
void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
// We should only ever be appending survivors at the end of a pause
assert(hr->is_survivor(), "Logic");
// Do the 'common' stuff
assert(hr->is_survivor(), "Must only add survivor regions, but is %s", hr->get_type_str());
add_young_region_common(hr);
// Now add the region at the right hand side
if (_inc_tail == NULL) {
assert(_inc_head == NULL, "invariant");
_inc_head = hr;
} else {
_inc_tail->set_next_in_collection_set(hr);
}
_inc_tail = hr;
}
// Add the region to the LHS of the incremental cset
void G1CollectionSet::add_eden_region(HeapRegion* hr) {
// Survivors should be added to the RHS at the end of a pause
assert(hr->is_eden(), "Logic");
// Do the 'common' stuff
assert(hr->is_eden(), "Must only add eden regions, but is %s", hr->get_type_str());
add_young_region_common(hr);
// Add the region at the left hand side
hr->set_next_in_collection_set(_inc_head);
if (_inc_head == NULL) {
assert(_inc_tail == NULL, "Invariant");
_inc_tail = hr;
}
_inc_head = hr;
}
#ifndef PRODUCT
void G1CollectionSet::print(HeapRegion* list_head, outputStream* st) {
assert(list_head == inc_head() || list_head == head(), "must be");
class G1VerifyYoungAgesClosure : public HeapRegionClosure {
public:
bool _valid;
public:
G1VerifyYoungAgesClosure() : HeapRegionClosure(), _valid(true) { }
st->print_cr("\nCollection_set:");
HeapRegion* csr = list_head;
while (csr != NULL) {
HeapRegion* next = csr->next_in_collection_set();
assert(csr->in_collection_set(), "bad CS");
st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
HR_FORMAT_PARAMS(csr),
p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()),
csr->age_in_surv_rate_group_cond());
csr = next;
virtual bool doHeapRegion(HeapRegion* r) {
guarantee(r->is_young(), "Region must be young but is %s", r->get_type_str());
SurvRateGroup* group = r->surv_rate_group();
if (group == NULL) {
log_error(gc, verify)("## encountered NULL surv_rate_group in young region");
_valid = false;
}
if (r->age_in_surv_rate_group() < 0) {
log_error(gc, verify)("## encountered negative age in young region");
_valid = false;
}
return false;
}
bool valid() const { return _valid; }
};
bool G1CollectionSet::verify_young_ages() {
assert_at_safepoint(true);
G1VerifyYoungAgesClosure cl;
iterate(&cl);
if (!cl.valid()) {
LogStreamHandle(Error, gc, verify) log;
print(&log);
}
return cl.valid();
}
class G1PrintCollectionSetClosure : public HeapRegionClosure {
outputStream* _st;
public:
G1PrintCollectionSetClosure(outputStream* st) : HeapRegionClosure(), _st(st) { }
virtual bool doHeapRegion(HeapRegion* r) {
assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index());
_st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
HR_FORMAT_PARAMS(r),
p2i(r->prev_top_at_mark_start()),
p2i(r->next_top_at_mark_start()),
r->age_in_surv_rate_group_cond());
return false;
}
};
void G1CollectionSet::print(outputStream* st) {
st->print_cr("\nCollection_set:");
G1PrintCollectionSetClosure cl(st);
iterate(&cl);
}
#endif // !PRODUCT
@ -281,7 +357,6 @@ double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1Survi
guarantee(target_pause_time_ms > 0.0,
"target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
guarantee(_head == NULL, "Precondition");
size_t pending_cards = _policy->pending_cards();
double base_time_ms = _policy->predict_base_elapsed_time_ms(pending_cards);
@ -305,7 +380,6 @@ double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1Survi
// Clear the fields that point to the survivor list - they are all young now.
survivors->convert_to_eden();
_head = _inc_head;
_bytes_used_before = _inc_bytes_used_before;
time_remaining_ms = MAX2(time_remaining_ms - _inc_predicted_elapsed_time_ms, 0.0);
@ -422,23 +496,41 @@ void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
}
#ifdef ASSERT
class G1VerifyYoungCSetIndicesClosure : public HeapRegionClosure {
private:
size_t _young_length;
int* _heap_region_indices;
public:
G1VerifyYoungCSetIndicesClosure(size_t young_length) : HeapRegionClosure(), _young_length(young_length) {
_heap_region_indices = NEW_C_HEAP_ARRAY(int, young_length, mtGC);
for (size_t i = 0; i < young_length; i++) {
_heap_region_indices[i] = -1;
}
}
~G1VerifyYoungCSetIndicesClosure() {
FREE_C_HEAP_ARRAY(int, _heap_region_indices);
}
virtual bool doHeapRegion(HeapRegion* r) {
const int idx = r->young_index_in_cset();
assert(idx > -1, "Young index must be set for all regions in the incremental collection set but is not for region %u.", r->hrm_index());
assert((size_t)idx < _young_length, "Young cset index too large for region %u", r->hrm_index());
assert(_heap_region_indices[idx] == -1,
"Index %d used by multiple regions, first use by region %u, second by region %u",
idx, _heap_region_indices[idx], r->hrm_index());
_heap_region_indices[idx] = r->hrm_index();
return false;
}
};
void G1CollectionSet::verify_young_cset_indices() const {
ResourceMark rm;
uint* heap_region_indices = NEW_RESOURCE_ARRAY(uint, young_region_length());
for (uint i = 0; i < young_region_length(); ++i) {
heap_region_indices[i] = (uint)-1;
}
assert_at_safepoint(true);
for (HeapRegion* hr = _inc_head; hr != NULL; hr = hr->next_in_collection_set()) {
const int idx = hr->young_index_in_cset();
assert(idx > -1, "must be set for all inc cset regions");
assert((uint)idx < young_region_length(), "young cset index too large");
assert(heap_region_indices[idx] == (uint)-1,
"index %d used by multiple regions, first use by %u, second by %u",
idx, heap_region_indices[idx], hr->hrm_index());
heap_region_indices[idx] = hr->hrm_index();
}
G1VerifyYoungCSetIndicesClosure cl(_collection_set_cur_length);
iterate(&cl);
}
#endif

@ -47,10 +47,15 @@ class G1CollectionSet VALUE_OBJ_CLASS_SPEC {
uint _survivor_region_length;
uint _old_region_length;
// The head of the list (via "next_in_collection_set()") representing the
// current collection set. Set from the incrementally built collection
// set at the start of the pause.
HeapRegion* _head;
// The actual collection set as a set of region indices.
// All entries in _collection_set_regions below _collection_set_cur_length are
// assumed to be valid entries.
// We assume that at any time there is at most only one writer and (one or more)
// concurrent readers. This means we are good with using storestore and loadload
// barriers on the writer and reader respectively only.
uint* _collection_set_regions;
volatile size_t _collection_set_cur_length;
size_t _collection_set_max_length;
// The number of bytes in the collection set before the pause. Set from
// the incrementally built collection set at the start of an evacuation
@ -71,12 +76,6 @@ class G1CollectionSet VALUE_OBJ_CLASS_SPEC {
CSetBuildType _inc_build_state;
// The head of the incrementally built collection set.
HeapRegion* _inc_head;
// The tail of the incrementally built collection set.
HeapRegion* _inc_tail;
// The number of bytes in the incrementally built collection set.
// Used to set _collection_set_bytes_used_before at the start of
// an evacuation pause.
@ -105,8 +104,6 @@ class G1CollectionSet VALUE_OBJ_CLASS_SPEC {
// See the comment for _inc_recorded_rs_lengths_diffs.
double _inc_predicted_elapsed_time_ms_diffs;
uint _inc_region_length;
G1CollectorState* collector_state();
G1GCPhaseTimes* phase_times();
@ -117,6 +114,9 @@ public:
G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
~G1CollectionSet();
// Initializes the collection set giving the maximum possible length of the collection set.
void initialize(uint max_region_length);
CollectionSetChooser* cset_chooser();
void init_region_lengths(uint eden_cset_region_length,
@ -133,36 +133,31 @@ public:
uint survivor_region_length() const { return _survivor_region_length; }
uint old_region_length() const { return _old_region_length; }
// Incremental CSet Support
// The head of the incrementally built collection set.
HeapRegion* inc_head() { return _inc_head; }
// The tail of the incrementally built collection set.
HeapRegion* inc_tail() { return _inc_tail; }
// Incremental collection set support
// Initialize incremental collection set info.
void start_incremental_building();
// Perform any final calculations on the incremental CSet fields
// Perform any final calculations on the incremental collection set fields
// before we can use them.
void finalize_incremental_building();
void clear_incremental() {
_inc_head = NULL;
_inc_tail = NULL;
_inc_region_length = 0;
}
// Reset the contents of the collection set.
void clear();
// Stop adding regions to the incremental collection set
// Iterate over the collection set, applying the given HeapRegionClosure on all of them.
// If may_be_aborted is true, iteration may be aborted using the return value of the
// called closure method.
void iterate(HeapRegionClosure* cl) const;
// Iterate over the collection set, applying the given HeapRegionClosure on all of them,
// trying to optimally spread out starting position of total_workers workers given the
// caller's worker_id.
void iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const;
// Stop adding regions to the incremental collection set.
void stop_incremental_building() { _inc_build_state = Inactive; }
// The head of the list (via "next_in_collection_set()") representing the
// current collection set.
HeapRegion* head() { return _head; }
void clear_head() { _head = NULL; }
size_t recorded_rs_lengths() { return _recorded_rs_lengths; }
size_t bytes_used_before() const {
@ -174,33 +169,32 @@ public:
}
// Choose a new collection set. Marks the chosen regions as being
// "in_collection_set", and links them together. The head and number of
// the collection set are available via access methods.
// "in_collection_set".
double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors);
void finalize_old_part(double time_remaining_ms);
// Add old region "hr" to the CSet.
// Add old region "hr" to the collection set.
void add_old_region(HeapRegion* hr);
// Update information about hr in the aggregated information for
// the incrementally built collection set.
void update_young_region_prediction(HeapRegion* hr, size_t new_rs_length);
// Add hr to the LHS of the incremental collection set.
// Add eden region to the collection set.
void add_eden_region(HeapRegion* hr);
// Add hr to the RHS of the incremental collection set.
// Add survivor region to the collection set.
void add_survivor_regions(HeapRegion* hr);
#ifndef PRODUCT
void print(HeapRegion* list_head, outputStream* st);
bool verify_young_ages();
void print(outputStream* st);
#endif // !PRODUCT
private:
// Update the incremental cset information when adding a region
// (should not be called directly).
// Update the incremental collection set information when adding a region.
void add_young_region_common(HeapRegion* hr);
};
#endif // SHARE_VM_GC_G1_G1COLLECTIONSET_HPP

@ -394,37 +394,6 @@ void G1DefaultPolicy::update_rs_lengths_prediction(size_t prediction) {
}
}
#ifndef PRODUCT
bool G1DefaultPolicy::verify_young_ages() {
bool ret = true;
for (HeapRegion* curr = _collection_set->inc_head();
curr != NULL;
curr = curr->next_in_collection_set()) {
guarantee(curr->is_young(), "Region must be young");
SurvRateGroup* group = curr->surv_rate_group();
if (group == NULL) {
log_error(gc, verify)("## encountered NULL surv_rate_group in young region");
ret = false;
}
if (curr->age_in_surv_rate_group() < 0) {
log_error(gc, verify)("## encountered negative age in young region");
ret = false;
}
}
if (!ret) {
LogStreamHandle(Error, gc, verify) log;
_collection_set->print(_collection_set->inc_head(), &log);
}
return ret;
}
#endif // PRODUCT
void G1DefaultPolicy::record_full_collection_start() {
_full_collection_start_sec = os::elapsedTime();
// Release the future to-space so that it is available for compaction into.
@ -488,7 +457,7 @@ void G1DefaultPolicy::record_collection_pause_start(double start_time_sec) {
_short_lived_surv_rate_group->stop_adding_regions();
_survivors_age_table.clear();
assert( verify_young_ages(), "region age verification" );
assert(_g1->collection_set()->verify_young_ages(), "region age verification failed");
}
void G1DefaultPolicy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {

@ -89,10 +89,6 @@ class G1DefaultPolicy: public G1Policy {
size_t _rs_lengths_prediction;
#ifndef PRODUCT
bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
#endif // PRODUCT
size_t _pending_cards;
// The amount of allocated bytes in old gen during the last mutator and the following
@ -116,10 +112,6 @@ public:
hr->install_surv_rate_group(_survivor_surv_rate_group);
}
#ifndef PRODUCT
bool verify_young_ages();
#endif // PRODUCT
void record_max_rs_lengths(size_t rs_lengths) {
_max_rs_lengths = rs_lengths;
}

@ -251,6 +251,5 @@ G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask() :
void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) {
RemoveSelfForwardPtrHRClosure rsfp_cl(worker_id, &_hrclaimer);
HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
_g1h->collection_set_iterate_from(hr, &rsfp_cl);
_g1h->collection_set_iterate_from(&rsfp_cl, worker_id);
}

@ -580,15 +580,20 @@ void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) {
}
}
void G1HeapVerifier::verify_dirty_young_list(HeapRegion* head) {
G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
for (HeapRegion* hr = head; hr != NULL; hr = hr->next_in_collection_set()) {
verify_dirty_region(hr);
class G1VerifyDirtyYoungListClosure : public HeapRegionClosure {
private:
G1HeapVerifier* _verifier;
public:
G1VerifyDirtyYoungListClosure(G1HeapVerifier* verifier) : HeapRegionClosure(), _verifier(verifier) { }
virtual bool doHeapRegion(HeapRegion* r) {
_verifier->verify_dirty_region(r);
return false;
}
}
};
void G1HeapVerifier::verify_dirty_young_regions() {
verify_dirty_young_list(_g1h->collection_set()->inc_head());
G1VerifyDirtyYoungListClosure cl(this);
_g1h->collection_set()->iterate(&cl);
}
bool G1HeapVerifier::verify_no_bits_over_tams(const char* bitmap_name, G1CMBitMapRO* bitmap,

@ -108,7 +108,6 @@ public:
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
void verify_dirty_young_regions() PRODUCT_RETURN;
};

@ -382,10 +382,8 @@ size_t G1RemSet::scan_rem_set(G1ParPushHeapRSClosure* oops_in_heap_closure,
uint worker_i) {
double rs_time_start = os::elapsedTime();
HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
G1ScanRSClosure cl(_scan_state, oops_in_heap_closure, heap_region_codeblobs, worker_i);
_g1->collection_set_iterate_from(startRegion, &cl);
_g1->collection_set_iterate_from(&cl, worker_i);
double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) -
cl.strong_code_root_scan_time_sec();

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -154,8 +154,8 @@ void G1StringDedupQueue::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* c
}
void G1StringDedupQueue::print_statistics() {
log_debug(gc, stringdedup)(" [Queue]");
log_debug(gc, stringdedup)(" [Dropped: " UINTX_FORMAT "]", _queue->_dropped);
log_debug(gc, stringdedup)(" Queue");
log_debug(gc, stringdedup)(" Dropped: " UINTX_FORMAT, _queue->_dropped);
}
void G1StringDedupQueue::verify() {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,7 +42,9 @@ G1StringDedupStat::G1StringDedupStat() :
_idle(0),
_exec(0),
_block(0),
_start(0.0),
_start_concurrent(0.0),
_end_concurrent(0.0),
_start_phase(0.0),
_idle_elapsed(0.0),
_exec_elapsed(0.0),
_block_elapsed(0.0) {
@ -69,7 +71,13 @@ void G1StringDedupStat::add(const G1StringDedupStat& stat) {
_block_elapsed += stat._block_elapsed;
}
void G1StringDedupStat::print_summary(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
void G1StringDedupStat::print_start(const G1StringDedupStat& last_stat) {
log_info(gc, stringdedup)(
"Concurrent String Deduplication (" G1_STRDEDUP_TIME_FORMAT ")",
G1_STRDEDUP_TIME_PARAM(last_stat._start_concurrent));
}
void G1StringDedupStat::print_end(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
double total_deduped_bytes_percent = 0.0;
if (total_stat._new_bytes > 0) {
@ -79,13 +87,16 @@ void G1StringDedupStat::print_summary(const G1StringDedupStat& last_stat, const
log_info(gc, stringdedup)(
"Concurrent String Deduplication "
G1_STRDEDUP_BYTES_FORMAT_NS "->" G1_STRDEDUP_BYTES_FORMAT_NS "(" G1_STRDEDUP_BYTES_FORMAT_NS "), avg "
G1_STRDEDUP_PERCENT_FORMAT_NS ", " G1_STRDEDUP_TIME_FORMAT,
G1_STRDEDUP_BYTES_FORMAT_NS "->" G1_STRDEDUP_BYTES_FORMAT_NS "(" G1_STRDEDUP_BYTES_FORMAT_NS ") "
"avg " G1_STRDEDUP_PERCENT_FORMAT_NS " "
"(" G1_STRDEDUP_TIME_FORMAT ", " G1_STRDEDUP_TIME_FORMAT ") " G1_STRDEDUP_TIME_FORMAT_MS,
G1_STRDEDUP_BYTES_PARAM(last_stat._new_bytes),
G1_STRDEDUP_BYTES_PARAM(last_stat._new_bytes - last_stat._deduped_bytes),
G1_STRDEDUP_BYTES_PARAM(last_stat._deduped_bytes),
total_deduped_bytes_percent,
last_stat._exec_elapsed);
G1_STRDEDUP_TIME_PARAM(last_stat._start_concurrent),
G1_STRDEDUP_TIME_PARAM(last_stat._end_concurrent),
G1_STRDEDUP_TIME_PARAM_MS(last_stat._exec_elapsed));
}
void G1StringDedupStat::print_statistics(const G1StringDedupStat& stat, bool total) {
@ -134,23 +145,31 @@ void G1StringDedupStat::print_statistics(const G1StringDedupStat& stat, bool tot
if (total) {
log_debug(gc, stringdedup)(
" [Total Exec: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT ", Idle: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT ", Blocked: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT "]",
stat._exec, stat._exec_elapsed, stat._idle, stat._idle_elapsed, stat._block, stat._block_elapsed);
" Total Exec: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT_MS
", Idle: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT_MS
", Blocked: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT_MS,
stat._exec, G1_STRDEDUP_TIME_PARAM_MS(stat._exec_elapsed),
stat._idle, G1_STRDEDUP_TIME_PARAM_MS(stat._idle_elapsed),
stat._block, G1_STRDEDUP_TIME_PARAM_MS(stat._block_elapsed));
} else {
log_debug(gc, stringdedup)(
" [Last Exec: " G1_STRDEDUP_TIME_FORMAT ", Idle: " G1_STRDEDUP_TIME_FORMAT ", Blocked: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT "]",
stat._exec_elapsed, stat._idle_elapsed, stat._block, stat._block_elapsed);
" Last Exec: " G1_STRDEDUP_TIME_FORMAT_MS
", Idle: " G1_STRDEDUP_TIME_FORMAT_MS
", Blocked: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT_MS,
G1_STRDEDUP_TIME_PARAM_MS(stat._exec_elapsed),
G1_STRDEDUP_TIME_PARAM_MS(stat._idle_elapsed),
stat._block, G1_STRDEDUP_TIME_PARAM_MS(stat._block_elapsed));
}
log_debug(gc, stringdedup)(" [Inspected: " G1_STRDEDUP_OBJECTS_FORMAT "]", stat._inspected);
log_debug(gc, stringdedup)(" [Skipped: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]", stat._skipped, skipped_percent);
log_debug(gc, stringdedup)(" [Hashed: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]", stat._hashed, hashed_percent);
log_debug(gc, stringdedup)(" [Known: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]", stat._known, known_percent);
log_debug(gc, stringdedup)(" [New: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "]",
log_debug(gc, stringdedup)(" Inspected: " G1_STRDEDUP_OBJECTS_FORMAT, stat._inspected);
log_debug(gc, stringdedup)(" Skipped: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")", stat._skipped, skipped_percent);
log_debug(gc, stringdedup)(" Hashed: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")", stat._hashed, hashed_percent);
log_debug(gc, stringdedup)(" Known: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")", stat._known, known_percent);
log_debug(gc, stringdedup)(" New: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT,
stat._new, new_percent, G1_STRDEDUP_BYTES_PARAM(stat._new_bytes));
log_debug(gc, stringdedup)(" [Deduplicated: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]",
log_debug(gc, stringdedup)(" Deduplicated: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")",
stat._deduped, deduped_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_bytes), deduped_bytes_percent);
log_debug(gc, stringdedup)(" [Young: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]",
log_debug(gc, stringdedup)(" Young: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")",
stat._deduped_young, deduped_young_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_young_bytes), deduped_young_bytes_percent);
log_debug(gc, stringdedup)(" [Old: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]",
log_debug(gc, stringdedup)(" Old: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")",
stat._deduped_old, deduped_old_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_old_bytes), deduped_old_bytes_percent);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,11 +30,14 @@
// Macros for GC log output formating
#define G1_STRDEDUP_OBJECTS_FORMAT UINTX_FORMAT_W(12)
#define G1_STRDEDUP_TIME_FORMAT "%1.7lf secs"
#define G1_STRDEDUP_PERCENT_FORMAT "%5.1lf%%"
#define G1_STRDEDUP_PERCENT_FORMAT_NS "%.1lf%%"
#define G1_STRDEDUP_BYTES_FORMAT "%8.1lf%s"
#define G1_STRDEDUP_BYTES_FORMAT_NS "%.1lf%s"
#define G1_STRDEDUP_TIME_FORMAT "%.3fs"
#define G1_STRDEDUP_TIME_PARAM(time) (time)
#define G1_STRDEDUP_TIME_FORMAT_MS "%.3fms"
#define G1_STRDEDUP_TIME_PARAM_MS(time) ((time) * MILLIUNITS)
#define G1_STRDEDUP_PERCENT_FORMAT "%5.1f%%"
#define G1_STRDEDUP_PERCENT_FORMAT_NS "%.1f%%"
#define G1_STRDEDUP_BYTES_FORMAT "%8.1f%s"
#define G1_STRDEDUP_BYTES_FORMAT_NS "%.1f%s"
#define G1_STRDEDUP_BYTES_PARAM(bytes) byte_size_in_proper_unit((double)(bytes)), proper_unit_for_byte_size((bytes))
//
@ -60,7 +63,9 @@ private:
uintx _block;
// Time spent by the deduplication thread in different phases
double _start;
double _start_concurrent;
double _end_concurrent;
double _start_phase;
double _idle_elapsed;
double _exec_elapsed;
double _block_elapsed;
@ -104,38 +109,41 @@ public:
}
void mark_idle() {
_start = os::elapsedTime();
_start_phase = os::elapsedTime();
_idle++;
}
void mark_exec() {
double now = os::elapsedTime();
_idle_elapsed = now - _start;
_start = now;
_idle_elapsed = now - _start_phase;
_start_phase = now;
_start_concurrent = now;
_exec++;
}
void mark_block() {
double now = os::elapsedTime();
_exec_elapsed += now - _start;
_start = now;
_exec_elapsed += now - _start_phase;
_start_phase = now;
_block++;
}
void mark_unblock() {
double now = os::elapsedTime();
_block_elapsed += now - _start;
_start = now;
_block_elapsed += now - _start_phase;
_start_phase = now;
}
void mark_done() {
double now = os::elapsedTime();
_exec_elapsed += now - _start;
_exec_elapsed += now - _start_phase;
_end_concurrent = now;
}
void add(const G1StringDedupStat& stat);
static void print_summary(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
static void print_start(const G1StringDedupStat& last_stat);
static void print_end(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
static void print_statistics(const G1StringDedupStat& stat, bool total);
};

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,16 +37,16 @@
#include "runtime/mutexLocker.hpp"
//
// Freelist in the deduplication table entry cache. Links table
// List of deduplication table entries. Links table
// entries together using their _next fields.
//
class G1StringDedupEntryFreeList : public CHeapObj<mtGC> {
class G1StringDedupEntryList : public CHeapObj<mtGC> {
private:
G1StringDedupEntry* _list;
size_t _length;
public:
G1StringDedupEntryFreeList() :
G1StringDedupEntryList() :
_list(NULL),
_length(0) {
}
@ -66,6 +66,12 @@ public:
return entry;
}
G1StringDedupEntry* remove_all() {
G1StringDedupEntry* list = _list;
_list = NULL;
return list;
}
size_t length() {
return _length;
}
@ -87,43 +93,53 @@ public:
//
class G1StringDedupEntryCache : public CHeapObj<mtGC> {
private:
// One freelist per GC worker to allow lock less freeing of
// entries while doing a parallel scan of the table. Using
// PaddedEnd to avoid false sharing.
PaddedEnd<G1StringDedupEntryFreeList>* _lists;
size_t _nlists;
// One cache/overflow list per GC worker to allow lock less freeing of
// entries while doing a parallel scan of the table. Using PaddedEnd to
// avoid false sharing.
size_t _nlists;
size_t _max_list_length;
PaddedEnd<G1StringDedupEntryList>* _cached;
PaddedEnd<G1StringDedupEntryList>* _overflowed;
public:
G1StringDedupEntryCache();
G1StringDedupEntryCache(size_t max_size);
~G1StringDedupEntryCache();
// Get a table entry from the cache freelist, or allocate a new
// entry if the cache is empty.
// Set max number of table entries to cache.
void set_max_size(size_t max_size);
// Get a table entry from the cache, or allocate a new entry if the cache is empty.
G1StringDedupEntry* alloc();
// Insert a table entry into the cache freelist.
// Insert a table entry into the cache.
void free(G1StringDedupEntry* entry, uint worker_id);
// Returns current number of entries in the cache.
size_t size();
// If the cache has grown above the given max size, trim it down
// and deallocate the memory occupied by trimmed of entries.
void trim(size_t max_size);
// Deletes overflowed entries.
void delete_overflowed();
};
G1StringDedupEntryCache::G1StringDedupEntryCache() {
_nlists = ParallelGCThreads;
_lists = PaddedArray<G1StringDedupEntryFreeList, mtGC>::create_unfreeable((uint)_nlists);
G1StringDedupEntryCache::G1StringDedupEntryCache(size_t max_size) :
_nlists(ParallelGCThreads),
_max_list_length(0),
_cached(PaddedArray<G1StringDedupEntryList, mtGC>::create_unfreeable((uint)_nlists)),
_overflowed(PaddedArray<G1StringDedupEntryList, mtGC>::create_unfreeable((uint)_nlists)) {
set_max_size(max_size);
}
G1StringDedupEntryCache::~G1StringDedupEntryCache() {
ShouldNotReachHere();
}
void G1StringDedupEntryCache::set_max_size(size_t size) {
_max_list_length = size / _nlists;
}
G1StringDedupEntry* G1StringDedupEntryCache::alloc() {
for (size_t i = 0; i < _nlists; i++) {
G1StringDedupEntry* entry = _lists[i].remove();
G1StringDedupEntry* entry = _cached[i].remove();
if (entry != NULL) {
return entry;
}
@ -134,31 +150,54 @@ G1StringDedupEntry* G1StringDedupEntryCache::alloc() {
void G1StringDedupEntryCache::free(G1StringDedupEntry* entry, uint worker_id) {
assert(entry->obj() != NULL, "Double free");
assert(worker_id < _nlists, "Invalid worker id");
entry->set_obj(NULL);
entry->set_hash(0);
_lists[worker_id].add(entry);
if (_cached[worker_id].length() < _max_list_length) {
// Cache is not full
_cached[worker_id].add(entry);
} else {
// Cache is full, add to overflow list for later deletion
_overflowed[worker_id].add(entry);
}
}
size_t G1StringDedupEntryCache::size() {
size_t size = 0;
for (size_t i = 0; i < _nlists; i++) {
size += _lists[i].length();
size += _cached[i].length();
}
return size;
}
void G1StringDedupEntryCache::trim(size_t max_size) {
size_t cache_size = 0;
void G1StringDedupEntryCache::delete_overflowed() {
double start = os::elapsedTime();
uintx count = 0;
for (size_t i = 0; i < _nlists; i++) {
G1StringDedupEntryFreeList* list = &_lists[i];
cache_size += list->length();
while (cache_size > max_size) {
G1StringDedupEntry* entry = list->remove();
assert(entry != NULL, "Should not be null");
cache_size--;
G1StringDedupEntry* entry;
{
// The overflow list can be modified during safepoints, therefore
// we temporarily join the suspendible thread set while removing
// all entries from the list.
SuspendibleThreadSetJoiner sts_join;
entry = _overflowed[i].remove_all();
}
// Delete all entries
while (entry != NULL) {
G1StringDedupEntry* next = entry->next();
delete entry;
entry = next;
count++;
}
}
double end = os::elapsedTime();
log_trace(gc, stringdedup)("Deleted " UINTX_FORMAT " entries, " G1_STRDEDUP_TIME_FORMAT_MS,
count, G1_STRDEDUP_TIME_PARAM_MS(end - start));
}
G1StringDedupTable* G1StringDedupTable::_table = NULL;
@ -195,7 +234,7 @@ G1StringDedupTable::~G1StringDedupTable() {
void G1StringDedupTable::create() {
assert(_table == NULL, "One string deduplication table allowed");
_entry_cache = new G1StringDedupEntryCache();
_entry_cache = new G1StringDedupEntryCache(_min_size * _max_cache_factor);
_table = new G1StringDedupTable(_min_size);
}
@ -389,6 +428,9 @@ G1StringDedupTable* G1StringDedupTable::prepare_resize() {
// Update statistics
_resize_count++;
// Update max cache size
_entry_cache->set_max_size(size * _max_cache_factor);
// Allocate the new table. The new table will be populated by workers
// calling unlink_or_oops_do() and finally installed by finish_resize().
return new G1StringDedupTable(size, _table->_hash_seed);
@ -441,7 +483,7 @@ void G1StringDedupTable::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* c
removed += unlink_or_oops_do(cl, table_half + partition_begin, table_half + partition_end, worker_id);
}
// Delayed update avoid contention on the table lock
// Delayed update to avoid contention on the table lock
if (removed > 0) {
MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag);
_table->_entries -= removed;
@ -563,22 +605,20 @@ void G1StringDedupTable::verify() {
}
}
void G1StringDedupTable::trim_entry_cache() {
MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag);
size_t max_cache_size = (size_t)(_table->_size * _max_cache_factor);
_entry_cache->trim(max_cache_size);
void G1StringDedupTable::clean_entry_cache() {
_entry_cache->delete_overflowed();
}
void G1StringDedupTable::print_statistics() {
Log(gc, stringdedup) log;
log.debug(" [Table]");
log.debug(" [Memory Usage: " G1_STRDEDUP_BYTES_FORMAT_NS "]",
log.debug(" Table");
log.debug(" Memory Usage: " G1_STRDEDUP_BYTES_FORMAT_NS,
G1_STRDEDUP_BYTES_PARAM(_table->_size * sizeof(G1StringDedupEntry*) + (_table->_entries + _entry_cache->size()) * sizeof(G1StringDedupEntry)));
log.debug(" [Size: " SIZE_FORMAT ", Min: " SIZE_FORMAT ", Max: " SIZE_FORMAT "]", _table->_size, _min_size, _max_size);
log.debug(" [Entries: " UINTX_FORMAT ", Load: " G1_STRDEDUP_PERCENT_FORMAT_NS ", Cached: " UINTX_FORMAT ", Added: " UINTX_FORMAT ", Removed: " UINTX_FORMAT "]",
log.debug(" Size: " SIZE_FORMAT ", Min: " SIZE_FORMAT ", Max: " SIZE_FORMAT, _table->_size, _min_size, _max_size);
log.debug(" Entries: " UINTX_FORMAT ", Load: " G1_STRDEDUP_PERCENT_FORMAT_NS ", Cached: " UINTX_FORMAT ", Added: " UINTX_FORMAT ", Removed: " UINTX_FORMAT,
_table->_entries, (double)_table->_entries / (double)_table->_size * 100.0, _entry_cache->size(), _entries_added, _entries_removed);
log.debug(" [Resize Count: " UINTX_FORMAT ", Shrink Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS "), Grow Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS ")]",
log.debug(" Resize Count: " UINTX_FORMAT ", Shrink Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS "), Grow Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS ")",
_resize_count, _table->_shrink_threshold, _shrink_load_factor * 100.0, _table->_grow_threshold, _grow_load_factor * 100.0);
log.debug(" [Rehash Count: " UINTX_FORMAT ", Rehash Threshold: " UINTX_FORMAT ", Hash Seed: 0x%x]", _rehash_count, _rehash_threshold, _table->_hash_seed);
log.debug(" [Age Threshold: " UINTX_FORMAT "]", StringDeduplicationAgeThreshold);
log.debug(" Rehash Count: " UINTX_FORMAT ", Rehash Threshold: " UINTX_FORMAT ", Hash Seed: 0x%x", _rehash_count, _rehash_threshold, _table->_hash_seed);
log.debug(" Age Threshold: " UINTX_FORMAT, StringDeduplicationAgeThreshold);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -229,8 +229,8 @@ public:
// and deletes the previously active table.
static void finish_rehash(G1StringDedupTable* rehashed_table);
// If the table entry cache has grown too large, trim it down according to policy
static void trim_entry_cache();
// If the table entry cache has grown too large, delete overflowed entries.
static void clean_entry_cache();
static void unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, uint worker_id);

@ -103,6 +103,7 @@ void G1StringDedupThread::run_service() {
SuspendibleThreadSetJoiner sts_join;
stat.mark_exec();
print_start(stat);
// Process the queue
for (;;) {
@ -121,30 +122,30 @@ void G1StringDedupThread::run_service() {
}
}
G1StringDedupTable::trim_entry_cache();
stat.mark_done();
// Print statistics
total_stat.add(stat);
print(stat, total_stat);
print_end(stat, total_stat);
}
}
G1StringDedupTable::clean_entry_cache();
}
}
void G1StringDedupThread::stop_service() {
G1StringDedupQueue::cancel_wait();
}
void G1StringDedupThread::print(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
if (log_is_enabled(Info, gc, stringdedup)) {
G1StringDedupStat::print_summary(last_stat, total_stat);
if (log_is_enabled(Debug, gc, stringdedup)) {
G1StringDedupStat::print_statistics(last_stat, false);
G1StringDedupStat::print_statistics(total_stat, true);
G1StringDedupTable::print_statistics();
G1StringDedupQueue::print_statistics();
}
void G1StringDedupThread::print_start(const G1StringDedupStat& last_stat) {
G1StringDedupStat::print_start(last_stat);
}
void G1StringDedupThread::print_end(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
G1StringDedupStat::print_end(last_stat, total_stat);
if (log_is_enabled(Debug, gc, stringdedup)) {
G1StringDedupStat::print_statistics(last_stat, false);
G1StringDedupStat::print_statistics(total_stat, true);
G1StringDedupTable::print_statistics();
G1StringDedupQueue::print_statistics();
}
}

@ -43,7 +43,8 @@ private:
G1StringDedupThread();
~G1StringDedupThread();
void print(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
void print_start(const G1StringDedupStat& last_stat);
void print_end(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
void run_service();
void stop_service();

@ -71,38 +71,51 @@ void G1YoungRemSetSamplingThread::stop_service() {
_monitor.notify();
}
class G1YoungRemSetSamplingClosure : public HeapRegionClosure {
SuspendibleThreadSetJoiner* _sts;
size_t _regions_visited;
size_t _sampled_rs_lengths;
public:
G1YoungRemSetSamplingClosure(SuspendibleThreadSetJoiner* sts) :
HeapRegionClosure(), _sts(sts), _regions_visited(0), _sampled_rs_lengths(0) { }
virtual bool doHeapRegion(HeapRegion* r) {
size_t rs_length = r->rem_set()->occupied();
_sampled_rs_lengths += rs_length;
// Update the collection set policy information for this region
G1CollectedHeap::heap()->collection_set()->update_young_region_prediction(r, rs_length);
_regions_visited++;
if (_regions_visited == 10) {
if (_sts->should_yield()) {
_sts->yield();
// A gc may have occurred and our sampling data is stale and further
// traversal of the collection set is unsafe
return true;
}
_regions_visited = 0;
}
return false;
}
size_t sampled_rs_lengths() const { return _sampled_rs_lengths; }
};
void G1YoungRemSetSamplingThread::sample_young_list_rs_lengths() {
SuspendibleThreadSetJoiner sts;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1Policy* g1p = g1h->g1_policy();
G1CollectionSet* g1cs = g1h->collection_set();
if (g1p->adaptive_young_list_length()) {
int regions_visited = 0;
HeapRegion* hr = g1cs->inc_head();
size_t sampled_rs_lengths = 0;
G1YoungRemSetSamplingClosure cl(&sts);
while (hr != NULL) {
size_t rs_length = hr->rem_set()->occupied();
sampled_rs_lengths += rs_length;
G1CollectionSet* g1cs = g1h->collection_set();
g1cs->iterate(&cl);
// Update the collection set policy information for this region
g1cs->update_young_region_prediction(hr, rs_length);
++regions_visited;
// we try to yield every time we visit 10 regions
if (regions_visited == 10) {
if (sts.should_yield()) {
sts.yield();
// A gc may have occurred and our sampling data is stale and further
// traversal of the collection set is unsafe
return;
}
regions_visited = 0;
}
assert(hr == g1cs->inc_tail() || hr->next_in_collection_set() != NULL, "next should only be null at tail of icset");
hr = hr->next_in_collection_set();
if (cl.complete()) {
g1p->revise_young_list_target_length_if_necessary(cl.sampled_rs_lengths());
}
g1p->revise_young_list_target_length_if_necessary(sampled_rs_lengths);
}
}

@ -284,7 +284,6 @@ HeapRegion::HeapRegion(uint hrm_index,
_hrm_index(hrm_index),
_allocation_context(AllocationContext::system()),
_humongous_start_region(NULL),
_next_in_special_set(NULL),
_evacuation_failed(false),
_prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
_next(NULL), _prev(NULL),

@ -261,12 +261,6 @@ class HeapRegion: public G1ContiguousSpace {
// True iff an attempt to evacuate an object in the region failed.
bool _evacuation_failed;
// A heap region may be a member one of a number of special subsets, each
// represented as linked lists through the field below. Currently, there
// is only one set:
// The collection set.
HeapRegion* _next_in_special_set;
// Fields used by the HeapRegionSetBase class and subclasses.
HeapRegion* _next;
HeapRegion* _prev;
@ -476,9 +470,6 @@ class HeapRegion: public G1ContiguousSpace {
inline bool in_collection_set() const;
inline HeapRegion* next_in_collection_set() const;
inline void set_next_in_collection_set(HeapRegion* r);
void set_allocation_context(AllocationContext_t context) {
_allocation_context = context;
}
@ -744,7 +735,7 @@ class HeapRegion: public G1ContiguousSpace {
// Terminates the iteration when the "doHeapRegion" method returns "true".
class HeapRegionClosure : public StackObj {
friend class HeapRegionManager;
friend class G1CollectedHeap;
friend class G1CollectionSet;
bool _complete;
void incomplete() { _complete = false; }

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -230,18 +230,4 @@ inline bool HeapRegion::in_collection_set() const {
return G1CollectedHeap::heap()->is_in_cset(this);
}
inline HeapRegion* HeapRegion::next_in_collection_set() const {
assert(in_collection_set(), "should only invoke on member of CS.");
assert(_next_in_special_set == NULL ||
_next_in_special_set->in_collection_set(),
"Malformed CS.");
return _next_in_special_set;
}
void HeapRegion::set_next_in_collection_set(HeapRegion* r) {
assert(in_collection_set(), "should only invoke on member of CS.");
assert(r == NULL || r->in_collection_set(), "Malformed CS.");
_next_in_special_set = r;
}
#endif // SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP

@ -386,13 +386,21 @@ GCTaskThread* GCTaskManager::install_worker(uint t) {
void GCTaskManager::add_workers(bool initializing) {
os::ThreadType worker_type = os::pgc_thread;
uint previous_created_workers = _created_workers;
_created_workers = WorkerManager::add_workers(this,
_active_workers,
(uint) _workers,
_workers,
_created_workers,
worker_type,
initializing);
_active_workers = MIN2(_created_workers, _active_workers);
WorkerManager::log_worker_creation(this, previous_created_workers, _active_workers, _created_workers, initializing);
}
const char* GCTaskManager::group_name() {
return "ParGC Thread";
}
void GCTaskManager::initialize() {

@ -556,6 +556,8 @@ protected:
GCTaskThread* install_worker(uint worker_id);
// Add GC workers as needed.
void add_workers(bool initializing);
// Base name (without worker id #) of threads.
const char* group_name();
};
//

@ -45,7 +45,7 @@ GCTaskThread::GCTaskThread(GCTaskManager* manager,
_time_stamp_index(0)
{
set_id(which);
set_name("ParGC Thread#%d", which);
set_name("%s#%d", manager->group_name(), which);
}
GCTaskThread::~GCTaskThread() {

@ -55,6 +55,7 @@ private:
return new GCTaskThread(manager, which, processor_id);
}
public:
static void destroy(GCTaskThread* manager) {
if (manager != NULL) {
delete manager;

@ -159,6 +159,8 @@ class CollectedHeap : public CHeapObj<mtInternal> {
inline static void post_allocation_setup_array(KlassHandle klass,
HeapWord* obj, int length);
inline static void post_allocation_setup_class(KlassHandle klass, HeapWord* obj, int size);
// Clears an allocated object.
inline static void init_obj(HeapWord* obj, size_t size);
@ -300,6 +302,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
inline static oop array_allocate_nozero(KlassHandle klass, int size, int length, TRAPS);
inline static oop class_allocate(KlassHandle klass, int size, TRAPS);
inline static void post_allocation_install_obj_klass(KlassHandle klass,
oop obj);

@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP
#define SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP
#include "classfile/javaClasses.hpp"
#include "gc/shared/allocTracer.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/threadLocalAllocBuffer.inline.hpp"
@ -96,6 +97,22 @@ void CollectedHeap::post_allocation_setup_obj(KlassHandle klass,
post_allocation_notify(klass, (oop)obj, size);
}
void CollectedHeap::post_allocation_setup_class(KlassHandle klass,
HeapWord* obj,
int size) {
// Set oop_size field before setting the _klass field
// in post_allocation_setup_common() because the klass field
// indicates that the object is parsable by concurrent GC.
oop new_cls = (oop)obj;
assert(size > 0, "oop_size must be positive.");
java_lang_Class::set_oop_size(new_cls, size);
post_allocation_setup_common(klass, obj);
assert(Universe::is_bootstrapping() ||
!new_cls->is_array(), "must not be an array");
// notify jvmti and dtrace
post_allocation_notify(klass, new_cls, size);
}
void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
HeapWord* obj,
int length) {
@ -207,6 +224,16 @@ oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) {
return (oop)obj;
}
oop CollectedHeap::class_allocate(KlassHandle klass, int size, TRAPS) {
debug_only(check_for_valid_allocation_state());
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
post_allocation_setup_class(klass, obj, size); // set oop_size
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
return (oop)obj;
}
oop CollectedHeap::array_allocate(KlassHandle klass,
int size,
int length,

@ -48,10 +48,10 @@ void PreservedMarks::restore_and_increment(volatile size_t* const total_size_add
#ifndef PRODUCT
void PreservedMarks::assert_empty() {
assert(_stack.is_empty(), "stack expected to be empty, size = "SIZE_FORMAT,
assert(_stack.is_empty(), "stack expected to be empty, size = " SIZE_FORMAT,
_stack.size());
assert(_stack.cache_size() == 0,
"stack expected to have no cached segments, cache size = "SIZE_FORMAT,
"stack expected to have no cached segments, cache size = " SIZE_FORMAT,
_stack.cache_size());
}
#endif // ndef PRODUCT

@ -47,18 +47,18 @@ class WorkerManager : public AllStatic {
// threads and a failure would not be optimal but should not be fatal.
template <class WorkerType>
static uint add_workers (WorkerType* holder,
uint active_workers,
uint total_workers,
uint created_workers,
os::ThreadType worker_type,
bool initializing) {
uint active_workers,
uint total_workers,
uint created_workers,
os::ThreadType worker_type,
bool initializing) {
uint start = created_workers;
uint end = MIN2(active_workers, total_workers);
for (uint worker_id = start; worker_id < end; worker_id += 1) {
WorkerThread* new_worker = holder->install_worker(worker_id);
assert(new_worker != NULL, "Failed to allocate GangWorker");
if (new_worker == NULL || !os::create_thread(new_worker, worker_type)) {
if(initializing) {
if (initializing) {
vm_exit_out_of_memory(0, OOM_MALLOC_ERROR,
"Cannot create worker GC thread. Out of system resources.");
}
@ -67,11 +67,21 @@ class WorkerManager : public AllStatic {
os::start_thread(new_worker);
}
log_trace(gc, task)("AdaptiveSizePolicy::add_workers() : "
"active_workers: %u created_workers: %u",
active_workers, created_workers);
return created_workers;
}
// Log (at trace level) a change in the number of created workers.
template <class WorkerType>
static void log_worker_creation(WorkerType* holder,
uint previous_created_workers,
uint active_workers,
uint created_workers,
bool initializing) {
if (previous_created_workers < created_workers) {
const char* initializing_msg = initializing ? "Adding initial" : "Creating additional";
log_trace(gc, task)("%s %s(s) previously created workers %u active workers %u total created workers %u",
initializing_msg, holder->group_name(), previous_created_workers, active_workers, created_workers);
}
}
};
#endif // SHARE_VM_GC_SHARED_WORKERMANAGER_HPP

@ -66,6 +66,7 @@ void AbstractWorkGang::add_workers(uint active_workers, bool initializing) {
} else {
worker_type = os::pgc_thread;
}
uint previous_created_workers = _created_workers;
_created_workers = WorkerManager::add_workers(this,
active_workers,
@ -74,6 +75,8 @@ void AbstractWorkGang::add_workers(uint active_workers, bool initializing) {
worker_type,
initializing);
_active_workers = MIN2(_created_workers, _active_workers);
WorkerManager::log_worker_creation(this, previous_created_workers, _active_workers, _created_workers, initializing);
}
AbstractGangWorker* AbstractWorkGang::worker(uint i) const {

@ -176,6 +176,9 @@ class AbstractWorkGang : public CHeapObj<mtInternal> {
// Return the Ith worker.
AbstractGangWorker* worker(uint i) const;
// Base name (without worker id #) of threads.
const char* group_name() { return name(); }
void threads_do(ThreadClosure* tc) const;
// Create a GC worker and install it into the work gang.

@ -576,27 +576,27 @@ void InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecodes::Code byt
// compute auxiliary field attributes
TosState state = as_TosState(info.field_type());
// We need to delay resolving put instructions on final fields
// until we actually invoke one. This is required so we throw
// exceptions at the correct place. If we do not resolve completely
// in the current pass, leaving the put_code set to zero will
// cause the next put instruction to reresolve.
Bytecodes::Code put_code = (Bytecodes::Code)0;
// We also need to delay resolving getstatic instructions until the
// class is intitialized. This is required so that access to the static
// Put instructions on final fields are not resolved. This is required so we throw
// exceptions at the correct place (when the instruction is actually invoked).
// If we do not resolve an instruction in the current pass, leaving the put_code
// set to zero will cause the next put instruction to the same field to reresolve.
//
// Also, we need to delay resolving getstatic and putstatic instructions until the
// class is initialized. This is required so that access to the static
// field will call the initialization function every time until the class
// is completely initialized ala. in 2.17.5 in JVM Specification.
InstanceKlass* klass = InstanceKlass::cast(info.field_holder());
bool uninitialized_static = ((bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic) &&
!klass->is_initialized());
Bytecodes::Code get_code = (Bytecodes::Code)0;
Bytecodes::Code put_code = (Bytecodes::Code)0;
if (is_put && !info.access_flags().is_final() && !uninitialized_static) {
put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield);
}
Bytecodes::Code get_code = (Bytecodes::Code)0;
if (!uninitialized_static) {
get_code = ((is_static) ? Bytecodes::_getstatic : Bytecodes::_getfield);
if (is_put || !info.access_flags().is_final()) {
put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield);
}
}
cp_cache_entry->set_field(

@ -970,7 +970,7 @@ void LinkResolver::resolve_field(fieldDescriptor& fd,
if (is_initialized_static_final_update || is_initialized_instance_final_update) {
ss.print("Update to %s final field %s.%s attempted from a different method (%s) than the initializer method %s ",
is_static ? "static" : "non-static", resolved_klass()->external_name(), fd.name()->as_C_string(),
current_klass()->external_name(),
m()->name()->as_C_string(),
is_static ? "<clinit>" : "<init>");
THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), ss.as_string());
}

@ -419,21 +419,20 @@ void Rewriter::scan_method(Method* method, bool reverse, bool* invokespecial_err
InstanceKlass* klass = method->method_holder();
u2 bc_index = Bytes::get_Java_u2(bcp + prefix_length + 1);
constantPoolHandle cp(method->constants());
Symbol* field_name = cp->name_ref_at(bc_index);
Symbol* field_sig = cp->signature_ref_at(bc_index);
Symbol* ref_class_name = cp->klass_name_at(cp->klass_ref_index_at(bc_index));
if (klass->name() == ref_class_name) {
Symbol* field_name = cp->name_ref_at(bc_index);
Symbol* field_sig = cp->signature_ref_at(bc_index);
fieldDescriptor fd;
klass->find_field(field_name, field_sig, &fd);
if (fd.access_flags().is_final()) {
if (fd.access_flags().is_static()) {
assert(c == Bytecodes::_putstatic, "must be putstatic");
if (!method->is_static_initializer()) {
fd.set_has_initialized_final_update(true);
}
} else {
assert(c == Bytecodes::_putfield, "must be putfield");
if (!method->is_object_initializer()) {
fd.set_has_initialized_final_update(true);
}

@ -415,17 +415,8 @@ void LogConfiguration::describe_available(outputStream* out){
void LogConfiguration::describe_current_configuration(outputStream* out){
out->print_cr("Log output configuration:");
for (size_t i = 0; i < _n_outputs; i++) {
out->print("#" SIZE_FORMAT ": %s ", i, _outputs[i]->name());
out->print_raw(_outputs[i]->config_string());
out->print(" ");
char delimiter[2] = {0};
for (size_t d = 0; d < LogDecorators::Count; d++) {
LogDecorators::Decorator decorator = static_cast<LogDecorators::Decorator>(d);
if (_outputs[i]->decorators().is_decorator(decorator)) {
out->print("%s%s", delimiter, LogDecorators::name(decorator));
*delimiter = ',';
}
}
out->print("#" SIZE_FORMAT ": ", i);
_outputs[i]->describe(out);
out->cr();
}
}

@ -428,3 +428,13 @@ char* LogFileOutput::make_file_name(const char* file_name,
result[result_len] = '\0';
return result;
}
void LogFileOutput::describe(outputStream *out) {
LogOutput::describe(out);
out->print(" ");
out->print("filecount=%u,filesize=" SIZE_FORMAT "%s", _file_count,
byte_size_in_proper_unit(_rotate_size),
proper_unit_for_byte_size(_rotate_size));
}

@ -85,6 +85,7 @@ class LogFileOutput : public LogFileStreamOutput {
virtual int write(const LogDecorations& decorations, const char* msg);
virtual int write(LogMessageBuffer::Iterator msg_iterator);
virtual void force_rotate();
virtual void describe(outputStream *out);
virtual const char* name() const {
return _name;

@ -83,3 +83,18 @@ void LogOutput::add_to_config_string(const LogTagSet* ts, LogLevelType level) {
break;
}
}
void LogOutput::describe(outputStream *out) {
out->print("%s ", name());
out->print_raw(config_string());
out->print(" ");
char delimiter[2] = {0};
for (size_t d = 0; d < LogDecorators::Count; d++) {
LogDecorators::Decorator decorator = static_cast<LogDecorators::Decorator>(d);
if (decorators().is_decorator(decorator)) {
out->print("%s%s", delimiter, LogDecorators::name(decorator));
*delimiter = ',';
}
}
}

@ -83,6 +83,8 @@ class LogOutput : public CHeapObj<mtLogging> {
// Do nothing by default.
}
virtual void describe(outputStream *out);
virtual const char* name() const = 0;
virtual bool initialize(const char* options, outputStream* errstream) = 0;
virtual int write(const LogDecorations& decorations, const char* msg) = 0;

@ -74,6 +74,7 @@ DEBUG_ONLY(size_t Test_log_prefix_prefixer(char* buf, size_t len);)
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ref)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ref, start)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, start)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, stringtable)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, sweep)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, task)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, task, start)) \

@ -3106,10 +3106,6 @@ void Metaspace::ergo_initialize() {
assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
if (MetaspaceSize < 256*K) {
vm_exit_during_initialization("Too small initial Metaspace size");
}
MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -50,13 +50,12 @@ instanceOop InstanceMirrorKlass::allocate_instance(KlassHandle k, TRAPS) {
// Query before forming handle.
int size = instance_size(k);
KlassHandle h_k(THREAD, this);
instanceOop i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
assert(size > 0, "total object size must be positive: %d", size);
// Since mirrors can be variable sized because of the static fields, store
// the size in the mirror itself.
java_lang_Class::set_oop_size(i, size);
return i;
return (instanceOop)CollectedHeap::class_allocate(h_k, size, CHECK_NULL);
}
int InstanceMirrorKlass::oop_size(oop obj) const {

@ -258,8 +258,8 @@ int oopDesc::size_given_klass(Klass* klass) {
}
}
assert(s % MinObjAlignment == 0, "alignment check");
assert(s > 0, "Bad size calculated");
assert(s % MinObjAlignment == 0, "Oop size is not properly aligned: %d", s);
assert(s > 0, "Oop size must be greater than zero, not %d", s);
return s;
}

@ -6509,6 +6509,59 @@ class C2 extends C1 implements I2 {
<errors>
</errors>
</function>
<function id="GetNamedModule" num="40" since="9">
<synopsis>Get Named Module</synopsis>
<description>
Return the <code>java.lang.reflect.Module</code> object for a named
module defined to a class loader that contains a given package.
The module is returned via <code>module_ptr</code>.
<p/>
If a named module is defined to the class loader and it
contains the package then that named module is returned,
otherwise <code>NULL</code> is returned.
<p/>
</description>
<origin>new</origin>
<capabilities>
</capabilities>
<parameters>
<param id="class_loader">
<ptrtype>
<jobject/>
<nullok>the bootstrap loader is assumed</nullok>
</ptrtype>
<description>
A class loader.
If the <code>class_loader</code> is not <code>NULL</code>
or a subclass of <code>java.lang.ClassLoader</code>
this function returns
<errorlink id="JVMTI_ERROR_ILLEGAL_ARGUMENT"></errorlink>.
</description>
</param>
<param id="package_name">
<inbuf><char/></inbuf>
<description>
The name of the package, encoded as a
<internallink id="mUTF">modified UTF-8</internallink> string.
The package name is in internal form (JVMS 4.2.1);
identifiers are separated by forward slashes rather than periods.
</description>
</param>
<param id="module_ptr">
<outptr><jobject/></outptr>
<description>
On return, points to a <code>java.lang.reflect.Module</code> object
or points to <code>NULL</code>.
</description>
</param>
</parameters>
<errors>
<error id="JVMTI_ERROR_ILLEGAL_ARGUMENT">
If class loader is not <code>NULL</code> and is not a class loader object.
</error>
</errors>
</function>
</category>
<category id="class" label="Class">
@ -12462,6 +12515,14 @@ myInit() {
<code>new_class_data</code> has been set, it becomes the
<code>class_data</code> for the next agent.
<p/>
When handling a class load in the live phase, then the
<functionlink id="GetNamedModule"></functionlink>
function can be used to map class loader and a package name to a module.
When a class is being redefined or retransformed then
<code>class_being_redefined</code> is non <code>NULL</code> and so
the JNI <code>GetModule</code> function can also be used
to obtain the Module.
<p/>
The order that this event is sent to each environment differs
from other events.
This event is sent to environments in the following order:
@ -14427,20 +14488,15 @@ typedef void (JNICALL *jvmtiEventVMInit)
<change date="19 June 2013" version="1.2.3">
Added support for statically linked agents.
</change>
<change date="20 January 2016" version="9.0.0">
<change date="5 July 2016" version="9.0.0">
Support for modules:
- The majorversion is 9 now
- The ClassFileLoadHook events are not sent during the primordial phase anymore.
- Add new function GetAllModules
</change>
<change date="17 February 2016" version="9.0.0">
Support for modules:
- Add new capability can_generate_early_vmstart
- Allow CompiledMethodLoad events at start phase
</change>
<change date="14 April 2016" version="9.0.0">
Support for modules:
- Add new capability can_generate_early_class_hook_events
- Add new function GetNamedModule
</change>
</changehistory>

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/classLoaderExt.hpp"
#include "classfile/modules.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "interpreter/bytecodeStream.hpp"
@ -201,6 +202,28 @@ JvmtiEnv::GetAllModules(jint* module_count_ptr, jobject** modules_ptr) {
} /* end GetAllModules */
// class_loader - NULL is a valid value, must be pre-checked
// package_name - pre-checked for NULL
// module_ptr - pre-checked for NULL
jvmtiError
JvmtiEnv::GetNamedModule(jobject class_loader, const char* package_name, jobject* module_ptr) {
JavaThread* THREAD = JavaThread::current(); // pass to macros
ResourceMark rm(THREAD);
Handle h_loader (THREAD, JNIHandles::resolve(class_loader));
// Check that loader is a subclass of java.lang.ClassLoader.
if (h_loader.not_null() && !java_lang_ClassLoader::is_subclass(h_loader->klass())) {
return JVMTI_ERROR_ILLEGAL_ARGUMENT;
}
jobject module = Modules::get_named_module(h_loader, package_name, THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
return JVMTI_ERROR_INTERNAL; // unexpected exception
}
*module_ptr = module;
return JVMTI_ERROR_NONE;
} /* end GetNamedModule */
//
// Class functions
//

@ -584,27 +584,26 @@ static bool verify_special_jvm_flags() {
// Parses a size specification string.
bool Arguments::atojulong(const char *s, julong* result) {
julong n = 0;
int args_read = 0;
bool is_hex = false;
// Skip leading 0[xX] for hexadecimal
if (*s =='0' && (*(s+1) == 'x' || *(s+1) == 'X')) {
s += 2;
is_hex = true;
args_read = sscanf(s, JULONG_FORMAT_X, &n);
} else {
args_read = sscanf(s, JULONG_FORMAT, &n);
}
if (args_read != 1) {
// First char must be a digit. Don't allow negative numbers or leading spaces.
if (!isdigit(*s)) {
return false;
}
while (*s != '\0' && (isdigit(*s) || (is_hex && isxdigit(*s)))) {
s++;
}
// 4705540: illegal if more characters are found after the first non-digit
if (strlen(s) > 1) {
bool is_hex = (s[0] == '0' && (s[1] == 'x' || s[1] == 'X'));
char* remainder;
errno = 0;
n = strtoull(s, &remainder, (is_hex ? 16 : 10));
if (errno != 0) {
return false;
}
switch (*s) {
// Fail if no number was read at all or if the remainder contains more than a single non-digit character.
if (remainder == s || strlen(remainder) > 1) {
return false;
}
switch (*remainder) {
case 'T': case 't':
*result = n * G * K;
// Check for overflow.

@ -131,8 +131,6 @@ static int Knob_MoveNotifyee = 2; // notify() - disposition of noti
static int Knob_QMode = 0; // EntryList-cxq policy - queue discipline
static volatile int InitDone = 0;
#define TrySpin TrySpin_VaryDuration
// -----------------------------------------------------------------------------
// Theory of operations -- Monitors lists, thread residency, etc:
//
@ -1848,13 +1846,8 @@ void ObjectMonitor::notifyAll(TRAPS) {
// hysteresis control to damp the transition rate between spinning and
// not spinning.
intptr_t ObjectMonitor::SpinCallbackArgument = 0;
int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL;
// Spinning: Fixed frequency (100%), vary duration
int ObjectMonitor::TrySpin_VaryDuration(Thread * Self) {
int ObjectMonitor::TrySpin(Thread * Self) {
// Dumb, brutal spin. Good for comparative measurements against adaptive spinning.
int ctr = Knob_FixedSpin;
if (ctr != 0) {
@ -1948,11 +1941,6 @@ int ObjectMonitor::TrySpin_VaryDuration(Thread * Self) {
goto Abort; // abrupt spin egress
}
if (Knob_UsePause & 1) SpinPause();
int (*scb)(intptr_t,int) = SpinCallbackFunction;
if (hits > 50 && scb != NULL) {
int abend = (*scb)(SpinCallbackArgument, 0);
}
}
if (Knob_UsePause & 2) SpinPause();

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -161,9 +161,6 @@ class ObjectMonitor {
Thread * volatile _Responsible;
volatile int _Spinner; // for exit->spinner handoff optimization
volatile int _SpinFreq; // Spin 1-out-of-N attempts: success rate
volatile int _SpinClock;
volatile intptr_t _SpinState; // MCS/CLH list of spinners
volatile int _SpinDuration;
volatile jint _count; // reference count to prevent reclamation/deflation
@ -238,10 +235,6 @@ class ObjectMonitor {
static int cxq_offset_in_bytes() { return offset_of(ObjectMonitor, _cxq); }
static int succ_offset_in_bytes() { return offset_of(ObjectMonitor, _succ); }
static int EntryList_offset_in_bytes() { return offset_of(ObjectMonitor, _EntryList); }
static int FreeNext_offset_in_bytes() { return offset_of(ObjectMonitor, FreeNext); }
static int WaitSet_offset_in_bytes() { return offset_of(ObjectMonitor, _WaitSet); }
static int Responsible_offset_in_bytes() { return offset_of(ObjectMonitor, _Responsible); }
static int Spinner_offset_in_bytes() { return offset_of(ObjectMonitor, _Spinner); }
// ObjectMonitor references can be ORed with markOopDesc::monitor_value
// as part of the ObjectMonitor tagging mechanism. When we combine an
@ -257,11 +250,6 @@ class ObjectMonitor {
#define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
((ObjectMonitor::f ## _offset_in_bytes()) - markOopDesc::monitor_value)
// Eventually we'll make provisions for multiple callbacks, but
// now one will suffice.
static int (*SpinCallbackFunction)(intptr_t, int);
static intptr_t SpinCallbackArgument;
markOop header() const;
void set_header(markOop hdr);
@ -312,8 +300,6 @@ class ObjectMonitor {
_cxq = NULL;
_WaitSet = NULL;
_recursions = 0;
_SpinFreq = 0;
_SpinClock = 0;
}
public:
@ -353,9 +339,7 @@ class ObjectMonitor {
void UnlinkAfterAcquire(Thread * Self, ObjectWaiter * SelfNode);
int TryLock(Thread * Self);
int NotRunnable(Thread * Self, Thread * Owner);
int TrySpin_Fixed(Thread * Self);
int TrySpin_VaryFrequency(Thread * Self);
int TrySpin_VaryDuration(Thread * Self);
int TrySpin(Thread * Self);
void ExitEpilog(Thread * Self, ObjectWaiter * Wakee);
bool ExitSuspendEquivalent(JavaThread * Self);
void post_monitor_wait_event(EventJavaMonitorWait * event,

@ -388,16 +388,6 @@ class SharedRuntime: AllStatic {
static int c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, VMRegPair *regs2,
int total_args_passed);
// Compute the new number of arguments in the signature if 32 bit ints
// must be converted to longs. Needed if CCallingConventionRequiresIntsAsLongs
// is true.
static int convert_ints_to_longints_argcnt(int in_args_count, BasicType* in_sig_bt);
// Adapt a method's signature if it contains 32 bit integers that must
// be converted to longs. Needed if CCallingConventionRequiresIntsAsLongs
// is true.
static void convert_ints_to_longints(int i2l_argcnt, int& in_args_count,
BasicType*& in_sig_bt, VMRegPair*& in_regs);
static size_t trampoline_size();
static void generate_trampoline(MacroAssembler *masm, address destination);

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -144,8 +144,6 @@ class ObjectSynchronizer : AllStatic {
static void verify() PRODUCT_RETURN;
static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
static void RegisterSpinCallback(int(*)(intptr_t, int), intptr_t);
private:
enum { _BLOCKSIZE = 128 };
// global list of blocks of monitors

@ -205,16 +205,39 @@ void VMError::print_stack_trace(outputStream* st, JavaThread* jt,
static void print_oom_reasons(outputStream* st) {
st->print_cr("# Possible reasons:");
st->print_cr("# The system is out of physical RAM or swap space");
st->print_cr("# In 32 bit mode, the process size limit was hit");
if (UseCompressedOops) {
st->print_cr("# The process is running with CompressedOops enabled, and the Java Heap may be blocking the growth of the native heap");
}
if (LogBytesPerWord == 2) {
st->print_cr("# In 32 bit mode, the process size limit was hit");
}
st->print_cr("# Possible solutions:");
st->print_cr("# Reduce memory load on the system");
st->print_cr("# Increase physical memory or swap space");
st->print_cr("# Check if swap backing store is full");
st->print_cr("# Use 64 bit Java on a 64 bit OS");
if (LogBytesPerWord == 2) {
st->print_cr("# Use 64 bit Java on a 64 bit OS");
}
st->print_cr("# Decrease Java heap size (-Xmx/-Xms)");
st->print_cr("# Decrease number of Java threads");
st->print_cr("# Decrease Java thread stack sizes (-Xss)");
st->print_cr("# Set larger code cache with -XX:ReservedCodeCacheSize=");
if (UseCompressedOops) {
switch (Universe::narrow_oop_mode()) {
case Universe::UnscaledNarrowOop:
st->print_cr("# JVM is running with Unscaled Compressed Oops mode in which the Java heap is");
st->print_cr("# placed in the first 4GB address space. The Java Heap base address is the");
st->print_cr("# maximum limit for the native heap growth. Please use -XX:HeapBaseMinAddress");
st->print_cr("# to set the Java Heap base and to place the Java Heap above 4GB virtual address.");
break;
case Universe::ZeroBasedNarrowOop:
st->print_cr("# JVM is running with Zero Based Compressed Oops mode in which the Java heap is");
st->print_cr("# placed in the first 32GB address space. The Java Heap base address is the");
st->print_cr("# maximum limit for the native heap growth. Please use -XX:HeapBaseMinAddress");
st->print_cr("# to set the Java Heap base and to place the Java Heap above 32GB virtual address.");
break;
}
}
st->print_cr("# This output file may be truncated or incomplete.");
}

@ -130,8 +130,8 @@ jre = \
# Tests that require the full JRE
#
needs_jre = \
compiler/c2/6852078/Test6852078.java \
compiler/c2/7047069/Test7047069.java \
compiler/c2/Test6852078.java \
compiler/c2/Test7047069.java \
runtime/6294277/SourceDebugExtension.java \
runtime/ClassFile/JsrRewriting.java \
runtime/ClassFile/OomWhileParsingRepeatedJsr.java \
@ -277,16 +277,16 @@ hotspot_fast_compiler_1 = \
compiler/arraycopy/ \
compiler/c1/ \
compiler/c2/ \
-compiler/c2/5091921/Test6850611.java \
-compiler/c2/5091921/Test6890943.java \
-compiler/c2/5091921/Test6905845.java \
-compiler/c2/6340864 \
-compiler/c2/6589834 \
-compiler/c2/6603011 \
-compiler/c2/6912517 \
-compiler/c2/6792161 \
-compiler/c2/7070134 \
-compiler/c2/8004867
-compiler/c2/Test6850611.java \
-compiler/c2/cr6890943/Test6890943.java \
-compiler/c2/Test6905845.java \
-compiler/c2/cr6340864 \
-compiler/c2/cr6589834 \
-compiler/c2/cr8004867
-compiler/c2/stemmer \
-compiler/c2/Test6792161.java \
-compiler/c2/Test6603011.java \
-compiler/c2/Test6912517.java \
hotspot_fast_compiler_2 = \
compiler/classUnloading/ \
@ -303,7 +303,7 @@ hotspot_fast_compiler_2 = \
compiler/integerArithmetic/ \
compiler/interpreter/ \
compiler/jvmci/ \
-compiler/codegen/7184394 \
-compiler/codegen/aes \
-compiler/codecache/stress \
-compiler/gcbarriers/PreserveFPRegistersTest.java
@ -320,13 +320,13 @@ hotspot_fast_compiler_3 = \
compiler/types/ \
compiler/uncommontrap/ \
compiler/unsafe/ \
-compiler/intrinsics/adler32 \
-compiler/intrinsics/bmi \
-compiler/intrinsics/mathexact \
-compiler/intrinsics/multiplytolen \
-compiler/intrinsics/sha \
-compiler/loopopts/7052494 \
-compiler/runtime/6826736
-compiler/intrinsics/bigInteger/TestMultiplyToLen.java \
-compiler/intrinsics/zip/TestAdler32.java \
-compiler/loopopts/Test7052494.java \
-compiler/runtime/Test6826736.java
hotspot_fast_compiler_closed = \
sanity/ExecuteInternalVMTests.java
@ -395,6 +395,17 @@ hotspot_jprt = \
:hotspot_fast_gc_gcold \
:hotspot_fast_runtime \
:hotspot_fast_serviceability
hotspot_runtime_tier2 = \
runtime/ \
serviceability/ \
-:hotspot_fast_runtime \
-:hotspot_fast_serviceability \
-:hotspot_runtime_tier2_platform_agnostic
hotspot_runtime_tier2_platform_agnostic = \
runtime/SelectionResolution \
-:hotspot_fast_runtime
#All tests that depends on nashorn extension.
#

@ -21,7 +21,10 @@
* questions.
*/
import jdk.test.lib.cli.*;
package compiler.arguments;
import jdk.test.lib.cli.CPUSpecificCommandLineOptionTest;
import jdk.test.lib.cli.CommandLineOptionTest;
/**
* Base class for all X86 bit manipulation related command line options.

@ -21,8 +21,10 @@
* questions.
*/
import jdk.test.lib.*;
import jdk.test.lib.cli.*;
package compiler.arguments;
import jdk.test.lib.ExitCode;
import jdk.test.lib.cli.CommandLineOptionTest;
/**
* Test on bit manipulation related command line options,

Some files were not shown because too many files have changed in this diff Show More