Merge
This commit is contained in:
commit
c9b2bc62c9
4
.hgtags
4
.hgtags
@ -261,3 +261,7 @@ efe7dbc6088691757404e0c8745f894e3ca9c022 jdk9-b09
|
||||
4a09f5d30be844ac6f714bdb0f63d8c3c08b9a98 jdk9-b16
|
||||
410bccbded9e9cce80f1e13ad221e37ae97a3986 jdk9-b17
|
||||
c5495e25c7258ab5f96a1ae14610887d76d2be63 jdk9-b18
|
||||
2dcf544eb7ed5ac6a3f7813a32e33acea7442405 jdk9-b19
|
||||
89731ae72a761afdf4262e8b9513f302f6563f89 jdk9-b20
|
||||
28dd0c7beb3cad9cf95f17b4b5ad87eb447a4084 jdk9-b21
|
||||
9678e0db8ff6ed845d4c2ee4a3baf7f386a777e5 jdk9-b22
|
||||
|
@ -261,3 +261,7 @@ b114474fb25af4e73cb7219f7c04bd8994da03a5 jdk9-b15
|
||||
cf22a728521f91a4692b433d39d730a0a1b23155 jdk9-b16
|
||||
24152ee0ee1abef54a8bab04c099261dba7bcca5 jdk9-b17
|
||||
65abab59f783fcf02ff8e133431c252f9e5f07d5 jdk9-b18
|
||||
75a08df650eb3126bab0c4d15241f5886162393c jdk9-b19
|
||||
ee4fd72b2ec3d92497f37163352f294aa695c6fb jdk9-b20
|
||||
9052803f4d01feda28b3d65f2b64dd457d21c7b6 jdk9-b21
|
||||
8e4bdab4c362aadde2d321f968cd503a2f779e2f jdk9-b22
|
||||
|
@ -512,7 +512,7 @@ AC_DEFUN_ONCE([BASIC_SETUP_DEVKIT],
|
||||
)
|
||||
|
||||
AC_ARG_WITH(sysroot, [AS_HELP_STRING([--with-sysroot],
|
||||
[use this directory as sysroot)])],
|
||||
[use this directory as sysroot])],
|
||||
[SYSROOT=$with_sysroot]
|
||||
)
|
||||
|
||||
@ -531,6 +531,75 @@ AC_DEFUN_ONCE([BASIC_SETUP_DEVKIT],
|
||||
[BASIC_PREPEND_TO_PATH([EXTRA_PATH],$with_extra_path)]
|
||||
)
|
||||
|
||||
if test "x$OPENJDK_BUILD_OS" = "xmacosx"; then
|
||||
# detect if Xcode is installed by running xcodebuild -version
|
||||
# if no Xcode installed, xcodebuild exits with 1
|
||||
# if Xcode is installed, even if xcode-select is misconfigured, then it exits with 0
|
||||
if /usr/bin/xcodebuild -version >/dev/null 2>&1; then
|
||||
# We need to use xcodebuild in the toolchain dir provided by the user, this will
|
||||
# fall back on the stub binary in /usr/bin/xcodebuild
|
||||
AC_PATH_PROG([XCODEBUILD], [xcodebuild], [/usr/bin/xcodebuild], [$TOOLCHAIN_PATH])
|
||||
else
|
||||
# this should result in SYSROOT being empty, unless --with-sysroot is provided
|
||||
# when only the command line tools are installed there are no SDKs, so headers
|
||||
# are copied into the system frameworks
|
||||
XCODEBUILD=
|
||||
AC_SUBST(XCODEBUILD)
|
||||
fi
|
||||
|
||||
AC_MSG_CHECKING([for sdk name])
|
||||
AC_ARG_WITH([sdk-name], [AS_HELP_STRING([--with-sdk-name],
|
||||
[use the platform SDK of the given name. @<:@macosx@:>@])],
|
||||
[SDKNAME=$with_sdk_name]
|
||||
)
|
||||
AC_MSG_RESULT([$SDKNAME])
|
||||
|
||||
# if toolchain path is specified then don't rely on system headers, they may not compile
|
||||
HAVE_SYSTEM_FRAMEWORK_HEADERS=0
|
||||
test -z "$TOOLCHAIN_PATH" && \
|
||||
HAVE_SYSTEM_FRAMEWORK_HEADERS=`test ! -f /System/Library/Frameworks/Foundation.framework/Headers/Foundation.h; echo $?`
|
||||
|
||||
if test -z "$SYSROOT"; then
|
||||
if test -n "$XCODEBUILD"; then
|
||||
# if we don't have system headers, use default SDK name (last resort)
|
||||
if test -z "$SDKNAME" -a $HAVE_SYSTEM_FRAMEWORK_HEADERS -eq 0; then
|
||||
SDKNAME=${SDKNAME:-macosx}
|
||||
fi
|
||||
|
||||
if test -n "$SDKNAME"; then
|
||||
# Call xcodebuild to determine SYSROOT
|
||||
SYSROOT=`"$XCODEBUILD" -sdk $SDKNAME -version | grep '^Path: ' | sed 's/Path: //'`
|
||||
fi
|
||||
else
|
||||
if test $HAVE_SYSTEM_FRAMEWORK_HEADERS -eq 0; then
|
||||
AC_MSG_ERROR([No xcodebuild tool and no system framework headers found, use --with-sysroot or --with-sdk-name to provide a path to a valid SDK])
|
||||
fi
|
||||
fi
|
||||
else
|
||||
# warn user if --with-sdk-name was also set
|
||||
if test -n "$with_sdk_name"; then
|
||||
AC_MSG_WARN([Both SYSROOT and --with-sdk-name are set, only SYSROOT will be used])
|
||||
fi
|
||||
fi
|
||||
|
||||
if test $HAVE_SYSTEM_FRAMEWORK_HEADERS -eq 0 -a -z "$SYSROOT"; then
|
||||
# If no system framework headers, then SYSROOT must be set, or we won't build
|
||||
AC_MSG_ERROR([Unable to determine SYSROOT and no headers found in /System/Library/Frameworks. Check Xcode configuration, --with-sysroot or --with-sdk-name arguments.])
|
||||
fi
|
||||
|
||||
# Perform a basic sanity test
|
||||
if test ! -f "$SYSROOT/System/Library/Frameworks/Foundation.framework/Headers/Foundation.h"; then
|
||||
if test -z "$SYSROOT"; then
|
||||
AC_MSG_ERROR([Unable to find required framework headers, provide a path to an SDK via --with-sysroot or --with-sdk-name and be sure Xcode is installed properly])
|
||||
else
|
||||
AC_MSG_ERROR([Invalid SDK or SYSROOT path, dependent framework headers not found])
|
||||
fi
|
||||
fi
|
||||
|
||||
# set SDKROOT too, Xcode tools will pick it up
|
||||
AC_SUBST(SDKROOT,$SYSROOT)
|
||||
fi
|
||||
|
||||
# Prepend the extra path to the global path
|
||||
BASIC_PREPEND_TO_PATH([PATH],$EXTRA_PATH)
|
||||
|
||||
|
@ -116,21 +116,25 @@ AC_DEFUN_ONCE([FLAGS_SETUP_INIT_FLAGS],
|
||||
AC_SUBST(RC_FLAGS)
|
||||
|
||||
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
|
||||
# FIXME: likely bug, should be CCXXFLAGS_JDK? or one for C or CXX.
|
||||
CCXXFLAGS="$CCXXFLAGS -nologo"
|
||||
# silence copyright notice and other headers.
|
||||
COMMON_CCXXFLAGS="$COMMON_CCXXFLAGS -nologo"
|
||||
fi
|
||||
|
||||
if test "x$SYSROOT" != "x"; then
|
||||
if test "x$TOOLCHAIN_TYPE" = xsolstudio; then
|
||||
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
|
||||
# Solaris Studio does not have a concept of sysroot. Instead we must
|
||||
# make sure the default include and lib dirs are appended to each
|
||||
# make sure the default include and lib dirs are appended to each
|
||||
# compile and link command line.
|
||||
SYSROOT_CFLAGS="-I$SYSROOT/usr/include"
|
||||
SYSROOT_LDFLAGS="-L$SYSROOT/usr/lib$OPENJDK_TARGET_CPU_ISADIR \
|
||||
-L$SYSROOT/lib$OPENJDK_TARGET_CPU_ISADIR \
|
||||
-L$SYSROOT/usr/ccs/lib$OPENJDK_TARGET_CPU_ISADIR"
|
||||
fi
|
||||
elif test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||
# Apple only wants -isysroot <path>, but we also need -iframework<path>/System/Library/Frameworks
|
||||
SYSROOT_CFLAGS="-isysroot \"$SYSROOT\" -iframework\"$SYSROOT/System/Library/Frameworks\""
|
||||
SYSROOT_LDFLAGS=$SYSROOT_CFLAGS
|
||||
elif test "x$TOOLCHAIN_TYPE" = xgcc; then
|
||||
SYSROOT_CFLAGS="--sysroot=\"$SYSROOT\""
|
||||
SYSROOT_LDFLAGS="--sysroot=\"$SYSROOT\""
|
||||
@ -143,6 +147,14 @@ AC_DEFUN_ONCE([FLAGS_SETUP_INIT_FLAGS],
|
||||
LEGACY_EXTRA_CXXFLAGS="$LEGACY_EXTRA_CXXFLAGS $SYSROOT_CFLAGS"
|
||||
LEGACY_EXTRA_LDFLAGS="$LEGACY_EXTRA_LDFLAGS $SYSROOT_LDFLAGS"
|
||||
fi
|
||||
|
||||
# These always need to be set, or we can't find the frameworks embedded in JavaVM.framework
|
||||
# set this here so it doesn't have to be peppered throughout the forest
|
||||
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||
SYSROOT_CFLAGS="$SYSROOT_CFLAGS -F\"$SYSROOT/System/Library/Frameworks/JavaVM.framework/Frameworks\""
|
||||
SYSROOT_LDFLAGS="$SYSROOT_LDFLAGS -F\"$SYSROOT/System/Library/Frameworks/JavaVM.framework/Frameworks\""
|
||||
fi
|
||||
|
||||
AC_SUBST(SYSROOT_CFLAGS)
|
||||
AC_SUBST(SYSROOT_LDFLAGS)
|
||||
])
|
||||
@ -302,6 +314,7 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
|
||||
# Debug symbols
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc; then
|
||||
if test "x$OPENJDK_TARGET_CPU_BITS" = "x64" && test "x$DEBUG_LEVEL" = "xfastdebug"; then
|
||||
# reduce from default "-g2" option to save space
|
||||
CFLAGS_DEBUG_SYMBOLS="-g1"
|
||||
CXXFLAGS_DEBUG_SYMBOLS="-g1"
|
||||
else
|
||||
@ -313,6 +326,7 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
|
||||
CXXFLAGS_DEBUG_SYMBOLS="-g"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
|
||||
CFLAGS_DEBUG_SYMBOLS="-g -xs"
|
||||
# FIXME: likely a bug, this disables debug symbols rather than enables them
|
||||
CXXFLAGS_DEBUG_SYMBOLS="-g0 -xs"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
|
||||
CFLAGS_DEBUG_SYMBOLS="-g"
|
||||
@ -321,6 +335,31 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
|
||||
AC_SUBST(CFLAGS_DEBUG_SYMBOLS)
|
||||
AC_SUBST(CXXFLAGS_DEBUG_SYMBOLS)
|
||||
|
||||
# bounds, memory and behavior checking options
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc; then
|
||||
case $DEBUG_LEVEL in
|
||||
release )
|
||||
# no adjustment
|
||||
;;
|
||||
fastdebug )
|
||||
# Add compile time bounds checks.
|
||||
CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1"
|
||||
CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1"
|
||||
;;
|
||||
slowdebug )
|
||||
# Add runtime bounds checks and symbol info.
|
||||
CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1"
|
||||
CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1"
|
||||
if test "x$HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR" = "xtrue"; then
|
||||
CFLAGS_DEBUG_OPTIONS="$CFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG"
|
||||
CXXFLAGS_DEBUG_OPTIONS="$CXXFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
AC_SUBST(CFLAGS_DEBUG_OPTIONS)
|
||||
AC_SUBST(CXXFLAGS_DEBUG_OPTIONS)
|
||||
|
||||
# Optimization levels
|
||||
if test "x$TOOLCHAIN_TYPE" = xsolstudio; then
|
||||
CC_HIGHEST="$CC_HIGHEST -fns -fsimple -fsingle -xbuiltin=%all -xdepend -xrestrict -xlibmil"
|
||||
@ -330,10 +369,12 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
|
||||
C_O_FLAG_HIGHEST="-xO4 -Wu,-O4~yz $CC_HIGHEST -xalias_level=basic -xregs=no%frameptr"
|
||||
C_O_FLAG_HI="-xO4 -Wu,-O4~yz -xregs=no%frameptr"
|
||||
C_O_FLAG_NORM="-xO2 -Wu,-O2~yz -xregs=no%frameptr"
|
||||
C_O_FLAG_DEBUG="-xregs=no%frameptr"
|
||||
C_O_FLAG_NONE="-xregs=no%frameptr"
|
||||
CXX_O_FLAG_HIGHEST="-xO4 -Qoption ube -O4~yz $CC_HIGHEST -xregs=no%frameptr"
|
||||
CXX_O_FLAG_HI="-xO4 -Qoption ube -O4~yz -xregs=no%frameptr"
|
||||
CXX_O_FLAG_NORM="-xO2 -Qoption ube -O2~yz -xregs=no%frameptr"
|
||||
CXX_O_FLAG_DEBUG="-xregs=no%frameptr"
|
||||
CXX_O_FLAG_NONE="-xregs=no%frameptr"
|
||||
if test "x$OPENJDK_TARGET_CPU_BITS" = "x32"; then
|
||||
C_O_FLAG_HIGHEST="$C_O_FLAG_HIGHEST -xchip=pentium"
|
||||
@ -343,10 +384,12 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
|
||||
C_O_FLAG_HIGHEST="-xO4 -Wc,-Qrm-s -Wc,-Qiselect-T0 $CC_HIGHEST -xalias_level=basic -xprefetch=auto,explicit -xchip=ultra"
|
||||
C_O_FLAG_HI="-xO4 -Wc,-Qrm-s -Wc,-Qiselect-T0"
|
||||
C_O_FLAG_NORM="-xO2 -Wc,-Qrm-s -Wc,-Qiselect-T0"
|
||||
C_O_FLAG_DEBUG=""
|
||||
C_O_FLAG_NONE=""
|
||||
CXX_O_FLAG_HIGHEST="-xO4 -Qoption cg -Qrm-s -Qoption cg -Qiselect-T0 $CC_HIGHEST -xprefetch=auto,explicit -xchip=ultra"
|
||||
CXX_O_FLAG_HI="-xO4 -Qoption cg -Qrm-s -Qoption cg -Qiselect-T0"
|
||||
CXX_O_FLAG_NORM="-xO2 -Qoption cg -Qrm-s -Qoption cg -Qiselect-T0"
|
||||
C_O_FLAG_DEBUG=""
|
||||
CXX_O_FLAG_NONE=""
|
||||
fi
|
||||
else
|
||||
@ -359,13 +402,17 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
|
||||
C_O_FLAG_HIGHEST="-Os"
|
||||
C_O_FLAG_HI="-Os"
|
||||
C_O_FLAG_NORM="-Os"
|
||||
C_O_FLAG_NONE=""
|
||||
else
|
||||
C_O_FLAG_HIGHEST="-O3"
|
||||
C_O_FLAG_HI="-O3"
|
||||
C_O_FLAG_NORM="-O2"
|
||||
C_O_FLAG_NONE="-O0"
|
||||
fi
|
||||
if test "x$HAS_CFLAG_OPTIMIZE_DEBUG" = "xtrue"; then
|
||||
C_O_FLAG_DEBUG="$CFLAG_OPTIMIZE_DEBUG_FLAG"
|
||||
else
|
||||
C_O_FLAG_DEBUG="-O0"
|
||||
fi
|
||||
C_O_FLAG_NONE="-O0"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||
# On MacOSX we optimize for size, something
|
||||
@ -373,37 +420,63 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
|
||||
C_O_FLAG_HIGHEST="-Os"
|
||||
C_O_FLAG_HI="-Os"
|
||||
C_O_FLAG_NORM="-Os"
|
||||
C_O_FLAG_NONE=""
|
||||
else
|
||||
C_O_FLAG_HIGHEST="-O3"
|
||||
C_O_FLAG_HI="-O3"
|
||||
C_O_FLAG_NORM="-O2"
|
||||
C_O_FLAG_NONE="-O0"
|
||||
fi
|
||||
C_O_FLAG_DEBUG="-O0"
|
||||
C_O_FLAG_NONE="-O0"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
|
||||
C_O_FLAG_HIGHEST="-O3"
|
||||
C_O_FLAG_HI="-O3 -qstrict"
|
||||
C_O_FLAG_NORM="-O2"
|
||||
C_O_FLAG_NONE=""
|
||||
C_O_FLAG_DEBUG="-qnoopt"
|
||||
C_O_FLAG_NONE="-qnoop"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
|
||||
C_O_FLAG_HIGHEST="-O2"
|
||||
C_O_FLAG_HI="-O1"
|
||||
C_O_FLAG_NORM="-O1"
|
||||
C_O_FLAG_DEBUG="-Od"
|
||||
C_O_FLAG_NONE="-Od"
|
||||
fi
|
||||
CXX_O_FLAG_HIGHEST="$C_O_FLAG_HIGHEST"
|
||||
CXX_O_FLAG_HI="$C_O_FLAG_HI"
|
||||
CXX_O_FLAG_NORM="$C_O_FLAG_NORM"
|
||||
CXX_O_FLAG_DEBUG="$C_O_FLAG_DEBUG"
|
||||
CXX_O_FLAG_NONE="$C_O_FLAG_NONE"
|
||||
fi
|
||||
|
||||
# Adjust optimization flags according to debug level.
|
||||
case $DEBUG_LEVEL in
|
||||
release )
|
||||
# no adjustment
|
||||
;;
|
||||
fastdebug )
|
||||
# Not quite so much optimization
|
||||
C_O_FLAG_HI="$C_O_FLAG_NORM"
|
||||
CXX_O_FLAG_HI="$CXX_O_FLAG_NORM"
|
||||
;;
|
||||
slowdebug )
|
||||
# Disable optimization
|
||||
C_O_FLAG_HIGHEST="$C_O_FLAG_DEBUG"
|
||||
C_O_FLAG_HI="$C_O_FLAG_DEBUG"
|
||||
C_O_FLAG_NORM="$C_O_FLAG_DEBUG"
|
||||
CXX_O_FLAG_HIGHEST="$CXX_O_FLAG_DEBUG"
|
||||
CXX_O_FLAG_HI="$CXX_O_FLAG_DEBUG"
|
||||
CXX_O_FLAG_NORM="$CXX_O_FLAG_DEBUG"
|
||||
;;
|
||||
esac
|
||||
|
||||
AC_SUBST(C_O_FLAG_HIGHEST)
|
||||
AC_SUBST(C_O_FLAG_HI)
|
||||
AC_SUBST(C_O_FLAG_NORM)
|
||||
AC_SUBST(C_O_FLAG_DEBUG)
|
||||
AC_SUBST(C_O_FLAG_NONE)
|
||||
AC_SUBST(CXX_O_FLAG_HIGHEST)
|
||||
AC_SUBST(CXX_O_FLAG_HI)
|
||||
AC_SUBST(CXX_O_FLAG_NORM)
|
||||
AC_SUBST(CXX_O_FLAG_DEBUG)
|
||||
AC_SUBST(CXX_O_FLAG_NONE)
|
||||
])
|
||||
|
||||
@ -461,11 +534,12 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
|
||||
# Later we will also have CFLAGS and LDFLAGS for the hotspot subrepo build.
|
||||
#
|
||||
|
||||
# Setup compiler/platform specific flags to CFLAGS_JDK,
|
||||
# CXXFLAGS_JDK and CCXXFLAGS_JDK (common to C and CXX?)
|
||||
# Setup compiler/platform specific flags into
|
||||
# CFLAGS_JDK - C Compiler flags
|
||||
# CXXFLAGS_JDK - C++ Compiler flags
|
||||
# COMMON_CCXXFLAGS_JDK - common to C and C++
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc; then
|
||||
# these options are used for both C and C++ compiles
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS $CCXXFLAGS_JDK -Wall -Wno-parentheses -Wextra -Wno-unused -Wno-unused-parameter -Wformat=2 \
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS $COMMON_CCXXFLAGS_JDK -Wall -Wno-parentheses -Wextra -Wno-unused -Wno-unused-parameter -Wformat=2 \
|
||||
-pipe -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE"
|
||||
case $OPENJDK_TARGET_CPU_ARCH in
|
||||
arm )
|
||||
@ -477,31 +551,31 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
|
||||
CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing"
|
||||
;;
|
||||
* )
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -fno-omit-frame-pointer"
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -fno-omit-frame-pointer"
|
||||
CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing"
|
||||
;;
|
||||
esac
|
||||
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS $CCXXFLAGS_JDK -DTRACING -DMACRO_MEMSYS_OPS -DBREAKPTS"
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS $COMMON_CCXXFLAGS_JDK -DTRACING -DMACRO_MEMSYS_OPS -DBREAKPTS"
|
||||
if test "x$OPENJDK_TARGET_CPU_ARCH" = xx86; then
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DcpuIntel -Di586 -D$OPENJDK_TARGET_CPU_LEGACY_LIB"
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DcpuIntel -Di586 -D$OPENJDK_TARGET_CPU_LEGACY_LIB"
|
||||
CFLAGS_JDK="$CFLAGS_JDK -erroff=E_BAD_PRAGMA_PACK_VALUE"
|
||||
fi
|
||||
|
||||
|
||||
CFLAGS_JDK="$CFLAGS_JDK -xc99=%none -xCC -errshort=tags -Xa -v -mt -W0,-noglobal"
|
||||
CXXFLAGS_JDK="$CXXFLAGS_JDK -errtags=yes +w -mt -features=no%except -DCC_NOEX -norunpath -xnolib"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
|
||||
CFLAGS_JDK="$CFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC"
|
||||
CXXFLAGS_JDK="$CXXFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS $CCXXFLAGS_JDK -Zi -MD -Zc:wchar_t- -W3 -wd4800 \
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS $COMMON_CCXXFLAGS_JDK -Zi -MD -Zc:wchar_t- -W3 -wd4800 \
|
||||
-D_STATIC_CPPLIB -D_DISABLE_DEPRECATE_STATIC_CPPLIB -DWIN32_LEAN_AND_MEAN \
|
||||
-D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE \
|
||||
-DWIN32 -DIAL"
|
||||
if test "x$OPENJDK_TARGET_CPU" = xx86_64; then
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_AMD64_ -Damd64"
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_AMD64_ -Damd64"
|
||||
else
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_X86_ -Dx86"
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_X86_ -Dx86"
|
||||
fi
|
||||
fi
|
||||
|
||||
@ -509,28 +583,20 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
|
||||
|
||||
# Adjust flags according to debug level.
|
||||
case $DEBUG_LEVEL in
|
||||
fastdebug )
|
||||
CFLAGS_JDK="$CFLAGS_JDK $CFLAGS_DEBUG_SYMBOLS"
|
||||
CXXFLAGS_JDK="$CXXFLAGS_JDK $CXXFLAGS_DEBUG_SYMBOLS"
|
||||
C_O_FLAG_HI="$C_O_FLAG_NORM"
|
||||
C_O_FLAG_NORM="$C_O_FLAG_NORM"
|
||||
CXX_O_FLAG_HI="$CXX_O_FLAG_NORM"
|
||||
CXX_O_FLAG_NORM="$CXX_O_FLAG_NORM"
|
||||
fastdebug | slowdebug )
|
||||
CFLAGS_JDK="$CFLAGS_JDK $CFLAGS_DEBUG_SYMBOLS $CFLAGS_DEBUG_OPTIONS"
|
||||
CXXFLAGS_JDK="$CXXFLAGS_JDK $CXXFLAGS_DEBUG_SYMBOLS $CXXFLAGS_DEBUG_OPTIONS"
|
||||
JAVAC_FLAGS="$JAVAC_FLAGS -g"
|
||||
;;
|
||||
slowdebug )
|
||||
CFLAGS_JDK="$CFLAGS_JDK $CFLAGS_DEBUG_SYMBOLS"
|
||||
CXXFLAGS_JDK="$CXXFLAGS_JDK $CXXFLAGS_DEBUG_SYMBOLS"
|
||||
C_O_FLAG_HI="$C_O_FLAG_NONE"
|
||||
C_O_FLAG_NORM="$C_O_FLAG_NONE"
|
||||
CXX_O_FLAG_HI="$CXX_O_FLAG_NONE"
|
||||
CXX_O_FLAG_NORM="$CXX_O_FLAG_NONE"
|
||||
JAVAC_FLAGS="$JAVAC_FLAGS -g"
|
||||
release )
|
||||
;;
|
||||
* )
|
||||
AC_MSG_ERROR([Unrecognized \$DEBUG_LEVEL: $DEBUG_LEVEL])
|
||||
;;
|
||||
esac
|
||||
|
||||
# Setup LP64
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK $ADD_LP64"
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK $ADD_LP64"
|
||||
|
||||
# Set some common defines. These works for all compilers, but assume
|
||||
# -D is universally accepted.
|
||||
@ -543,74 +609,69 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
|
||||
# Note: -Dmacro is the same as #define macro 1
|
||||
# -Dmacro= is the same as #define macro
|
||||
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_LITTLE_ENDIAN="
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_LITTLE_ENDIAN="
|
||||
else
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_LITTLE_ENDIAN"
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_LITTLE_ENDIAN"
|
||||
fi
|
||||
else
|
||||
# Same goes for _BIG_ENDIAN. Do we really need to set *ENDIAN on Solaris if they
|
||||
# are defined in the system?
|
||||
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_BIG_ENDIAN="
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_BIG_ENDIAN="
|
||||
else
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_BIG_ENDIAN"
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_BIG_ENDIAN"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# Setup target OS define. Use OS target name but in upper case.
|
||||
OPENJDK_TARGET_OS_UPPERCASE=`$ECHO $OPENJDK_TARGET_OS | $TR 'abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'`
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D$OPENJDK_TARGET_OS_UPPERCASE"
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D$OPENJDK_TARGET_OS_UPPERCASE"
|
||||
|
||||
# Setup target CPU
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DARCH='\"$OPENJDK_TARGET_CPU_LEGACY\"' -D$OPENJDK_TARGET_CPU_LEGACY"
|
||||
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DARCH='\"$OPENJDK_TARGET_CPU_LEGACY\"' -D$OPENJDK_TARGET_CPU_LEGACY"
|
||||
|
||||
# Setup debug/release defines
|
||||
if test "x$DEBUG_LEVEL" = xrelease; then
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DNDEBUG"
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DNDEBUG"
|
||||
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DTRIMMED"
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DTRIMMED"
|
||||
fi
|
||||
else
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DDEBUG"
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DDEBUG"
|
||||
fi
|
||||
|
||||
# Setup release name
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DRELEASE='\"\$(RELEASE)\"'"
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DRELEASE='\"\$(RELEASE)\"'"
|
||||
|
||||
|
||||
# Set some additional per-OS defines.
|
||||
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_ALLBSD_SOURCE -D_DARWIN_UNLIMITED_SELECT"
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE -D_DARWIN_UNLIMITED_SELECT"
|
||||
elif test "x$OPENJDK_TARGET_OS" = xaix; then
|
||||
# FIXME: PPC64 should not be here.
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DPPC64"
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DPPC64"
|
||||
elif test "x$OPENJDK_TARGET_OS" = xbsd; then
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_ALLBSD_SOURCE"
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE"
|
||||
fi
|
||||
|
||||
# Additional macosx handling
|
||||
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc; then
|
||||
# FIXME: This needs to be exported in spec.gmk due to closed legacy code.
|
||||
# FIXME: clean this up, and/or move it elsewhere.
|
||||
# Setting these parameters makes it an error to link to macosx APIs that are
|
||||
# newer than the given OS version and makes the linked binaries compatible
|
||||
# even if built on a newer version of the OS.
|
||||
# The expected format is X.Y.Z
|
||||
MACOSX_VERSION_MIN=10.7.0
|
||||
AC_SUBST(MACOSX_VERSION_MIN)
|
||||
|
||||
# Setting these parameters makes it an error to link to macosx APIs that are
|
||||
# newer than the given OS version and makes the linked binaries compatible
|
||||
# even if built on a newer version of the OS.
|
||||
# The expected format is X.Y.Z
|
||||
MACOSX_VERSION_MIN=10.7.0
|
||||
AC_SUBST(MACOSX_VERSION_MIN)
|
||||
|
||||
# The macro takes the version with no dots, ex: 1070
|
||||
# Let the flags variables get resolved in make for easier override on make
|
||||
# command line.
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DMAC_OS_X_VERSION_MAX_ALLOWED=\$(subst .,,\$(MACOSX_VERSION_MIN)) -mmacosx-version-min=\$(MACOSX_VERSION_MIN)"
|
||||
LDFLAGS_JDK="$LDFLAGS_JDK -mmacosx-version-min=\$(MACOSX_VERSION_MIN)"
|
||||
fi
|
||||
# The macro takes the version with no dots, ex: 1070
|
||||
# Let the flags variables get resolved in make for easier override on make
|
||||
# command line.
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DMAC_OS_X_VERSION_MAX_ALLOWED=\$(subst .,,\$(MACOSX_VERSION_MIN)) -mmacosx-version-min=\$(MACOSX_VERSION_MIN)"
|
||||
LDFLAGS_JDK="$LDFLAGS_JDK -mmacosx-version-min=\$(MACOSX_VERSION_MIN)"
|
||||
fi
|
||||
|
||||
# Setup some hard coded includes
|
||||
CCXXFLAGS_JDK="$CCXXFLAGS_JDK \
|
||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK \
|
||||
-I${JDK_OUTPUTDIR}/include \
|
||||
-I${JDK_OUTPUTDIR}/include/$OPENJDK_TARGET_OS \
|
||||
-I${JDK_TOPDIR}/src/share/javavm/export \
|
||||
@ -619,12 +680,12 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
|
||||
-I${JDK_TOPDIR}/src/$OPENJDK_TARGET_OS_API_DIR/native/common"
|
||||
|
||||
# The shared libraries are compiled using the picflag.
|
||||
CFLAGS_JDKLIB="$CCXXFLAGS_JDK $CFLAGS_JDK $PICFLAG $CFLAGS_JDKLIB_EXTRA"
|
||||
CXXFLAGS_JDKLIB="$CCXXFLAGS_JDK $CXXFLAGS_JDK $PICFLAG $CXXFLAGS_JDKLIB_EXTRA "
|
||||
CFLAGS_JDKLIB="$COMMON_CCXXFLAGS_JDK $CFLAGS_JDK $PICFLAG $CFLAGS_JDKLIB_EXTRA"
|
||||
CXXFLAGS_JDKLIB="$COMMON_CCXXFLAGS_JDK $CXXFLAGS_JDK $PICFLAG $CXXFLAGS_JDKLIB_EXTRA "
|
||||
|
||||
# Executable flags
|
||||
CFLAGS_JDKEXE="$CCXXFLAGS_JDK $CFLAGS_JDK"
|
||||
CXXFLAGS_JDKEXE="$CCXXFLAGS_JDK $CXXFLAGS_JDK"
|
||||
CFLAGS_JDKEXE="$COMMON_CCXXFLAGS_JDK $CFLAGS_JDK"
|
||||
CXXFLAGS_JDKEXE="$COMMON_CCXXFLAGS_JDK $CXXFLAGS_JDK"
|
||||
|
||||
AC_SUBST(CFLAGS_JDKLIB)
|
||||
AC_SUBST(CFLAGS_JDKEXE)
|
||||
@ -633,6 +694,7 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
|
||||
|
||||
# Setup LDFLAGS et al.
|
||||
#
|
||||
|
||||
# Now this is odd. The JDK native libraries have to link against libjvm.so
|
||||
# On 32-bit machines there is normally two distinct libjvm.so:s, client and server.
|
||||
# Which should we link to? Are we lucky enough that the binary api to the libjvm.so library
|
||||
@ -648,39 +710,93 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
|
||||
fi
|
||||
# TODO: make -debug optional "--disable-full-debug-symbols"
|
||||
LDFLAGS_JDK="$LDFLAGS_JDK -debug"
|
||||
LDFLAGS_JDKLIB="${LDFLAGS_JDK} -dll -libpath:${JDK_OUTPUTDIR}/lib"
|
||||
LDFLAGS_JDKLIB_SUFFIX=""
|
||||
elif test "x$TOOLCHAIN_TYPE" = xgcc; then
|
||||
# If this is a --hash-style=gnu system, use --hash-style=both, why?
|
||||
# We have previously set HAS_GNU_HASH if this is the case
|
||||
if test -n "$HAS_GNU_HASH"; then
|
||||
LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker --hash-style=both"
|
||||
fi
|
||||
if test "x$OPENJDK_TARGET_OS" = xlinux; then
|
||||
# And since we now know that the linker is gnu, then add -z defs, to forbid
|
||||
# undefined symbols in object files.
|
||||
LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker -z -Xlinker defs"
|
||||
case $DEBUG_LEVEL in
|
||||
release )
|
||||
# tell linker to optimize libraries.
|
||||
# Should this be supplied to the OSS linker as well?
|
||||
LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker -O1"
|
||||
;;
|
||||
slowdebug )
|
||||
if test "x$HAS_LINKER_NOW" = "xtrue"; then
|
||||
# do relocations at load
|
||||
LDFLAGS_JDK="$LDFLAGS_JDK $LINKER_NOW_FLAG"
|
||||
LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK $LINKER_NOW_FLAG"
|
||||
fi
|
||||
if test "x$HAS_LINKER_RELRO" = "xtrue"; then
|
||||
# mark relocations read only
|
||||
LDFLAGS_JDK="$LDFLAGS_JDK $LINKER_RELRO_FLAG"
|
||||
LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK $LINKER_RELRO_FLAG"
|
||||
fi
|
||||
;;
|
||||
fastdebug )
|
||||
if test "x$HAS_LINKER_RELRO" = "xtrue"; then
|
||||
# mark relocations read only
|
||||
LDFLAGS_JDK="$LDFLAGS_JDK $LINKER_RELRO_FLAG"
|
||||
LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK $LINKER_RELRO_FLAG"
|
||||
fi
|
||||
;;
|
||||
* )
|
||||
AC_MSG_ERROR([Unrecognized \$DEBUG_LEVEL: $DEBUG_LEVEL])
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
|
||||
LDFLAGS_JDK="$LDFLAGS_JDK -z defs -xildoff -ztext"
|
||||
LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK -norunpath -xnolib"
|
||||
fi
|
||||
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
# If undefined behaviour detection is enabled then we need to tell linker.
|
||||
case $DEBUG_LEVEL in
|
||||
release | fastdebug )
|
||||
;;
|
||||
slowdebug )
|
||||
AC_MSG_WARN([$HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR])
|
||||
if test "x$HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR" = "xtrue"; then
|
||||
# enable undefined behaviour checking
|
||||
LDFLAGS_JDK="$LDFLAGS_JDK `$ECHO -n $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG | sed -e "s/[ ]*\([^ ]\+\)/ -Xlinker \1/g"`"
|
||||
LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK `$ECHO -n $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG | sed -e "s/[ ]*\([^ ]\+\)/ -Xlinker \1/g"`"
|
||||
fi
|
||||
;;
|
||||
* )
|
||||
AC_MSG_ERROR([Unrecognized \$DEBUG_LEVEL: $DEBUG_LEVEL])
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Customize LDFLAGS for executables
|
||||
|
||||
LDFLAGS_JDKEXE="${LDFLAGS_JDK}"
|
||||
|
||||
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
|
||||
if test "x$OPENJDK_TARGET_CPU_BITS" = "x64"; then
|
||||
LDFLAGS_STACK_SIZE=1048576
|
||||
else
|
||||
LDFLAGS_STACK_SIZE=327680
|
||||
fi
|
||||
LDFLAGS_JDKEXE="${LDFLAGS_JDK} /STACK:$LDFLAGS_STACK_SIZE"
|
||||
LDFLAGS_JDKEXE="${LDFLAGS_JDKEXE} /STACK:$LDFLAGS_STACK_SIZE"
|
||||
elif test "x$OPENJDK_TARGET_OS" = xlinux; then
|
||||
LDFLAGS_JDKEXE="$LDFLAGS_JDKEXE -Xlinker --allow-shlib-undefined"
|
||||
fi
|
||||
|
||||
# Customize LDFLAGS for libs
|
||||
LDFLAGS_JDKLIB="${LDFLAGS_JDK}"
|
||||
|
||||
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
|
||||
LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} -dll -libpath:${JDK_OUTPUTDIR}/lib"
|
||||
LDFLAGS_JDKLIB_SUFFIX=""
|
||||
else
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc; then
|
||||
# If this is a --hash-style=gnu system, use --hash-style=both, why?
|
||||
# We have previously set HAS_GNU_HASH if this is the case
|
||||
if test -n "$HAS_GNU_HASH"; then
|
||||
LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker --hash-style=both "
|
||||
fi
|
||||
if test "x$OPENJDK_TARGET_OS" = xlinux; then
|
||||
# And since we now know that the linker is gnu, then add -z defs, to forbid
|
||||
# undefined symbols in object files.
|
||||
LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker -z -Xlinker defs"
|
||||
if test "x$DEBUG_LEVEL" = "xrelease"; then
|
||||
# When building release libraries, tell the linker optimize them.
|
||||
# Should this be supplied to the OSS linker as well?
|
||||
LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker -O1"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if test "x$TOOLCHAIN_TYPE" = xsolstudio; then
|
||||
LDFLAGS_JDK="$LDFLAGS_JDK -z defs -xildoff -ztext"
|
||||
LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK -norunpath -xnolib"
|
||||
fi
|
||||
|
||||
LDFLAGS_JDKLIB="${LDFLAGS_JDK} $SHARED_LIBRARY_FLAGS \
|
||||
LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} ${SHARED_LIBRARY_FLAGS} \
|
||||
-L${JDK_OUTPUTDIR}/lib${OPENJDK_TARGET_CPU_LIBDIR}"
|
||||
|
||||
# On some platforms (mac) the linker warns about non existing -L dirs.
|
||||
@ -701,12 +817,8 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
|
||||
if test "x$TOOLCHAIN_TYPE" = xsolstudio; then
|
||||
LDFLAGS_JDKLIB_SUFFIX="$LDFLAGS_JDKLIB_SUFFIX -lc"
|
||||
fi
|
||||
|
||||
LDFLAGS_JDKEXE="${LDFLAGS_JDK}"
|
||||
if test "x$OPENJDK_TARGET_OS" = xlinux; then
|
||||
LDFLAGS_JDKEXE="$LDFLAGS_JDKEXE -Xlinker --allow-shlib-undefined"
|
||||
fi
|
||||
fi
|
||||
|
||||
AC_SUBST(LDFLAGS_JDKLIB)
|
||||
AC_SUBST(LDFLAGS_JDKEXE)
|
||||
AC_SUBST(LDFLAGS_JDKLIB_SUFFIX)
|
||||
@ -714,7 +826,6 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
|
||||
AC_SUBST(LDFLAGS_CXX_JDK)
|
||||
])
|
||||
|
||||
|
||||
# FLAGS_COMPILER_CHECK_ARGUMENTS([ARGUMENT], [RUN-IF-TRUE],
|
||||
# [RUN-IF-FALSE])
|
||||
# ------------------------------------------------------------
|
||||
@ -727,7 +838,7 @@ AC_DEFUN([FLAGS_COMPILER_CHECK_ARGUMENTS],
|
||||
saved_cflags="$CFLAGS"
|
||||
CFLAGS="$CFLAGS $1"
|
||||
AC_LANG_PUSH([C])
|
||||
AC_COMPILE_IFELSE([AC_LANG_SOURCE([[int i;]])], [],
|
||||
AC_COMPILE_IFELSE([AC_LANG_SOURCE([[int i;]])], [],
|
||||
[supports=no])
|
||||
AC_LANG_POP([C])
|
||||
CFLAGS="$saved_cflags"
|
||||
@ -735,7 +846,7 @@ AC_DEFUN([FLAGS_COMPILER_CHECK_ARGUMENTS],
|
||||
saved_cxxflags="$CXXFLAGS"
|
||||
CXXFLAGS="$CXXFLAG $1"
|
||||
AC_LANG_PUSH([C++])
|
||||
AC_COMPILE_IFELSE([AC_LANG_SOURCE([[int i;]])], [],
|
||||
AC_COMPILE_IFELSE([AC_LANG_SOURCE([[int i;]])], [],
|
||||
[supports=no])
|
||||
AC_LANG_POP([C++])
|
||||
CXXFLAGS="$saved_cxxflags"
|
||||
@ -748,6 +859,31 @@ AC_DEFUN([FLAGS_COMPILER_CHECK_ARGUMENTS],
|
||||
fi
|
||||
])
|
||||
|
||||
# FLAGS_LINKER_CHECK_ARGUMENTS([ARGUMENT], [RUN-IF-TRUE],
|
||||
# [RUN-IF-FALSE])
|
||||
# ------------------------------------------------------------
|
||||
# Check that the linker support an argument
|
||||
AC_DEFUN([FLAGS_LINKER_CHECK_ARGUMENTS],
|
||||
[
|
||||
AC_MSG_CHECKING([if linker supports "$1"])
|
||||
supports=yes
|
||||
|
||||
saved_ldflags="$LDFLAGS"
|
||||
LDFLAGS="$LDFLAGS $1"
|
||||
AC_LANG_PUSH([C])
|
||||
AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],
|
||||
[], [supports=no])
|
||||
AC_LANG_POP([C])
|
||||
LDFLAGS="$saved_ldflags"
|
||||
|
||||
AC_MSG_RESULT([$supports])
|
||||
if test "x$supports" = "xyes" ; then
|
||||
m4_ifval([$2], [$2], [:])
|
||||
else
|
||||
m4_ifval([$3], [$3], [:])
|
||||
fi
|
||||
])
|
||||
|
||||
AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_MISC],
|
||||
[
|
||||
# Some Zero and Shark settings.
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -69,8 +69,8 @@ ISA_DIR=$(OPENJDK_TARGET_CPU_ISADIR)
|
||||
# Yet another name for arch used for an extra subdir below the jvm lib.
|
||||
# Uses i386 and amd64, instead of x86 and x86_64.
|
||||
LIBARCH=$(OPENJDK_TARGET_CPU_LEGACY_LIB)
|
||||
# Old name for OPENJDK_TARGET_CPU, uses i586 and amd64, instead of x86 and x86_64.
|
||||
ARCH=$(OPENJDK_TARGET_CPU_LEGACY)
|
||||
# Set the cpu architecture
|
||||
ARCH=$(OPENJDK_TARGET_CPU_ARCH)
|
||||
# Legacy setting for building for a 64 bit machine.
|
||||
# If yes then this expands to _LP64:=1
|
||||
@LP64@
|
||||
|
@ -65,8 +65,6 @@ AC_DEFUN_ONCE([LIB_SETUP_INIT],
|
||||
ALSA_NOT_NEEDED=yes
|
||||
PULSE_NOT_NEEDED=yes
|
||||
X11_NOT_NEEDED=yes
|
||||
# If the java runtime framework is disabled, then we need X11.
|
||||
# This will be adjusted below.
|
||||
AC_MSG_RESULT([alsa pulse x11])
|
||||
fi
|
||||
|
||||
@ -83,20 +81,6 @@ AC_DEFUN_ONCE([LIB_SETUP_INIT],
|
||||
if test "x$SUPPORT_HEADFUL" = xno; then
|
||||
X11_NOT_NEEDED=yes
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# Check for MacOSX support for OpenJDK.
|
||||
#
|
||||
|
||||
BASIC_DEPRECATED_ARG_ENABLE(macosx-runtime-support, macosx_runtime_support)
|
||||
|
||||
AC_MSG_CHECKING([for Mac OS X Java Framework])
|
||||
if test -f /System/Library/Frameworks/JavaVM.framework/Frameworks/JavaRuntimeSupport.framework/Headers/JavaRuntimeSupport.h; then
|
||||
AC_MSG_RESULT([/System/Library/Frameworks/JavaVM.framework])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
fi
|
||||
])
|
||||
|
||||
AC_DEFUN_ONCE([LIB_SETUP_X11],
|
||||
@ -620,11 +604,36 @@ AC_DEFUN_ONCE([LIB_SETUP_MISC_LIBS],
|
||||
# Check for the jpeg library
|
||||
#
|
||||
|
||||
USE_EXTERNAL_LIBJPEG=true
|
||||
AC_CHECK_LIB(jpeg, main, [],
|
||||
[ USE_EXTERNAL_LIBJPEG=false
|
||||
AC_MSG_NOTICE([Will use jpeg decoder bundled with the OpenJDK source])
|
||||
])
|
||||
AC_ARG_WITH(libjpeg, [AS_HELP_STRING([--with-libjpeg],
|
||||
[use libjpeg from build system or OpenJDK source (system, bundled) @<:@bundled@:>@])])
|
||||
|
||||
AC_MSG_CHECKING([for which libjpeg to use])
|
||||
|
||||
# default is bundled
|
||||
DEFAULT_LIBJPEG=bundled
|
||||
|
||||
#
|
||||
# if user didn't specify, use DEFAULT_LIBJPEG
|
||||
#
|
||||
if test "x${with_libjpeg}" = "x"; then
|
||||
with_libjpeg=${DEFAULT_LIBJPEG}
|
||||
fi
|
||||
|
||||
AC_MSG_RESULT(${with_libjpeg})
|
||||
|
||||
if test "x${with_libjpeg}" = "xbundled"; then
|
||||
USE_EXTERNAL_LIBJPEG=false
|
||||
elif test "x${with_libjpeg}" = "xsystem"; then
|
||||
AC_CHECK_HEADER(jpeglib.h, [],
|
||||
[ AC_MSG_ERROR([--with-libjpeg=system specified, but jpeglib.h not found!])])
|
||||
AC_CHECK_LIB(jpeg, jpeg_CreateDecompress, [],
|
||||
[ AC_MSG_ERROR([--with-libjpeg=system specified, but no libjpeg found])])
|
||||
|
||||
USE_EXTERNAL_LIBJPEG=true
|
||||
else
|
||||
AC_MSG_ERROR([Invalid use of --with-libjpeg: ${with_libjpeg}, use 'system' or 'bundled'])
|
||||
fi
|
||||
|
||||
AC_SUBST(USE_EXTERNAL_LIBJPEG)
|
||||
|
||||
###############################################################################
|
||||
|
@ -84,7 +84,7 @@ AC_DEFUN([PLATFORM_EXTRACT_VARS_FROM_CPU],
|
||||
VAR_CPU_BITS=32
|
||||
VAR_CPU_ENDIAN=big
|
||||
;;
|
||||
sparcv9)
|
||||
sparcv9|sparc64)
|
||||
VAR_CPU=sparcv9
|
||||
VAR_CPU_ARCH=sparc
|
||||
VAR_CPU_BITS=64
|
||||
|
@ -347,6 +347,9 @@ CPP:=@FIXPATH@ @CPP@
|
||||
# The linker can be gcc or ld on posix systems, or link.exe on windows systems.
|
||||
LD:=@FIXPATH@ @LD@
|
||||
|
||||
# Xcode SDK path
|
||||
SDKROOT:=@SDKROOT@
|
||||
|
||||
# The linker on older SuSE distros (e.g. on SLES 10) complains with:
|
||||
# "Invalid version tag `SUNWprivate_1.1'. Only anonymous version tag is allowed in executable."
|
||||
# if feeded with a version script which contains named tags.
|
||||
@ -544,7 +547,7 @@ SETFILE:=@SETFILE@
|
||||
XATTR:=@XATTR@
|
||||
JT_HOME:=@JT_HOME@
|
||||
JTREGEXE:=@JTREGEXE@
|
||||
|
||||
XCODEBUILD=@XCODEBUILD@
|
||||
FIXPATH:=@FIXPATH@
|
||||
|
||||
# Where the build output is stored for your convenience.
|
||||
|
@ -24,11 +24,11 @@
|
||||
#
|
||||
|
||||
########################################################################
|
||||
# This file is responsible for detecting, verifying and setting up the
|
||||
# toolchain, i.e. the compiler, linker and related utilities. It will setup
|
||||
# This file is responsible for detecting, verifying and setting up the
|
||||
# toolchain, i.e. the compiler, linker and related utilities. It will setup
|
||||
# proper paths to the binaries, but it will not setup any flags.
|
||||
#
|
||||
# The binaries used is determined by the toolchain type, which is the family of
|
||||
# The binaries used is determined by the toolchain type, which is the family of
|
||||
# compilers and related tools that are used.
|
||||
########################################################################
|
||||
|
||||
@ -83,7 +83,7 @@ AC_DEFUN([TOOLCHAIN_SETUP_FILENAME_PATTERNS],
|
||||
AC_SUBST(SHARED_LIBRARY)
|
||||
AC_SUBST(STATIC_LIBRARY)
|
||||
AC_SUBST(OBJ_SUFFIX)
|
||||
AC_SUBST(EXE_SUFFIX)
|
||||
AC_SUBST(EXE_SUFFIX)
|
||||
])
|
||||
|
||||
# Determine which toolchain type to use, and make sure it is valid for this
|
||||
@ -98,26 +98,33 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETERMINE_TOOLCHAIN_TYPE],
|
||||
VALID_TOOLCHAINS=${!toolchain_var_name}
|
||||
|
||||
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||
# On Mac OS X, default toolchain to clang after Xcode 5
|
||||
XCODE_VERSION_OUTPUT=`xcodebuild -version 2>&1 | $HEAD -n 1`
|
||||
$ECHO "$XCODE_VERSION_OUTPUT" | $GREP "Xcode " > /dev/null
|
||||
if test $? -ne 0; then
|
||||
AC_MSG_ERROR([Failed to determine Xcode version.])
|
||||
fi
|
||||
XCODE_MAJOR_VERSION=`$ECHO $XCODE_VERSION_OUTPUT | \
|
||||
$SED -e 's/^Xcode \(@<:@1-9@:>@@<:@0-9.@:>@*\)/\1/' | \
|
||||
$CUT -f 1 -d .`
|
||||
AC_MSG_NOTICE([Xcode major version: $XCODE_MAJOR_VERSION])
|
||||
if test $XCODE_MAJOR_VERSION -ge 5; then
|
||||
DEFAULT_TOOLCHAIN="clang"
|
||||
if test -n "$XCODEBUILD"; then
|
||||
# On Mac OS X, default toolchain to clang after Xcode 5
|
||||
XCODE_VERSION_OUTPUT=`"$XCODEBUILD" -version 2>&1 | $HEAD -n 1`
|
||||
$ECHO "$XCODE_VERSION_OUTPUT" | $GREP "Xcode " > /dev/null
|
||||
if test $? -ne 0; then
|
||||
AC_MSG_ERROR([Failed to determine Xcode version.])
|
||||
fi
|
||||
XCODE_MAJOR_VERSION=`$ECHO $XCODE_VERSION_OUTPUT | \
|
||||
$SED -e 's/^Xcode \(@<:@1-9@:>@@<:@0-9.@:>@*\)/\1/' | \
|
||||
$CUT -f 1 -d .`
|
||||
AC_MSG_NOTICE([Xcode major version: $XCODE_MAJOR_VERSION])
|
||||
if test $XCODE_MAJOR_VERSION -ge 5; then
|
||||
DEFAULT_TOOLCHAIN="clang"
|
||||
else
|
||||
DEFAULT_TOOLCHAIN="gcc"
|
||||
fi
|
||||
else
|
||||
DEFAULT_TOOLCHAIN="gcc"
|
||||
# If Xcode is not installed, but the command line tools are
|
||||
# then we can't run xcodebuild. On these systems we should
|
||||
# default to clang
|
||||
DEFAULT_TOOLCHAIN="clang"
|
||||
fi
|
||||
else
|
||||
# First toolchain type in the list is the default
|
||||
DEFAULT_TOOLCHAIN=${VALID_TOOLCHAINS%% *}
|
||||
fi
|
||||
|
||||
|
||||
if test "x$with_toolchain_type" = xlist; then
|
||||
# List all toolchains
|
||||
AC_MSG_NOTICE([The following toolchains are valid on this platform:])
|
||||
@ -126,7 +133,7 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETERMINE_TOOLCHAIN_TYPE],
|
||||
TOOLCHAIN_DESCRIPTION=${!toolchain_var_name}
|
||||
$PRINTF " %-10s %s\n" $toolchain "$TOOLCHAIN_DESCRIPTION"
|
||||
done
|
||||
|
||||
|
||||
exit 0
|
||||
elif test "x$with_toolchain_type" != x; then
|
||||
# User override; check that it is valid
|
||||
@ -168,10 +175,10 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETERMINE_TOOLCHAIN_TYPE],
|
||||
AC_MSG_NOTICE([Using default toolchain $TOOLCHAIN_TYPE ($TOOLCHAIN_DESCRIPTION)])
|
||||
else
|
||||
AC_MSG_NOTICE([Using user selected toolchain $TOOLCHAIN_TYPE ($TOOLCHAIN_DESCRIPTION). Default toolchain is $DEFAULT_TOOLCHAIN.])
|
||||
fi
|
||||
fi
|
||||
])
|
||||
|
||||
# Before we start detecting the toolchain executables, we might need some
|
||||
# Before we start detecting the toolchain executables, we might need some
|
||||
# special setup, e.g. additional paths etc.
|
||||
AC_DEFUN_ONCE([TOOLCHAIN_PRE_DETECTION],
|
||||
[
|
||||
@ -184,7 +191,7 @@ AC_DEFUN_ONCE([TOOLCHAIN_PRE_DETECTION],
|
||||
ORG_OBJCFLAGS="$OBJCFLAGS"
|
||||
|
||||
# On Windows, we need to detect the visual studio installation first.
|
||||
# This will change the PATH, but we need to keep that new PATH even
|
||||
# This will change the PATH, but we need to keep that new PATH even
|
||||
# after toolchain detection is done, since the compiler (on x86) uses
|
||||
# it for DLL resolution in runtime.
|
||||
if test "x$OPENJDK_BUILD_OS" = "xwindows" && test "x$TOOLCHAIN_TYPE" = "xmicrosoft"; then
|
||||
@ -208,7 +215,7 @@ AC_DEFUN_ONCE([TOOLCHAIN_PRE_DETECTION],
|
||||
PATH="/usr/ccs/bin:$PATH"
|
||||
fi
|
||||
|
||||
# Finally add TOOLCHAIN_PATH at the beginning, to allow --with-tools-dir to
|
||||
# Finally add TOOLCHAIN_PATH at the beginning, to allow --with-tools-dir to
|
||||
# override all other locations.
|
||||
if test "x$TOOLCHAIN_PATH" != x; then
|
||||
PATH=$TOOLCHAIN_PATH:$PATH
|
||||
@ -254,7 +261,7 @@ AC_DEFUN([TOOLCHAIN_CHECK_COMPILER_VERSION],
|
||||
AC_MSG_NOTICE([The result from running with --version was: "$ALT_VERSION_OUTPUT"])
|
||||
AC_MSG_ERROR([A $TOOLCHAIN_TYPE compiler is required. Try setting --with-tools-dir.])
|
||||
fi
|
||||
# Remove usage instructions (if present), and
|
||||
# Remove usage instructions (if present), and
|
||||
# collapse compiler output into a single line
|
||||
COMPILER_VERSION_STRING=`$ECHO $COMPILER_VERSION_OUTPUT | \
|
||||
$SED -e 's/ *@<:@Uu@:>@sage:.*//'`
|
||||
@ -282,7 +289,7 @@ AC_DEFUN([TOOLCHAIN_CHECK_COMPILER_VERSION],
|
||||
# There is no specific version flag, but all output starts with a version string.
|
||||
# First line typically looks something like:
|
||||
# Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 16.00.40219.01 for 80x86
|
||||
COMPILER_VERSION_OUTPUT=`$COMPILER 2>&1 | $HEAD -n 1 | $TR -d '\r'`
|
||||
COMPILER_VERSION_OUTPUT=`$COMPILER 2>&1 | $HEAD -n 1 | $TR -d '\r'`
|
||||
# Check that this is likely to be Microsoft CL.EXE.
|
||||
$ECHO "$COMPILER_VERSION_OUTPUT" | $GREP "Microsoft.*Compiler" > /dev/null
|
||||
if test $? -ne 0; then
|
||||
@ -360,7 +367,7 @@ AC_DEFUN([TOOLCHAIN_FIND_COMPILER],
|
||||
AC_MSG_NOTICE([Will use user supplied compiler $1=[$]$1])
|
||||
if test "x`basename [$]$1`" = "x[$]$1"; then
|
||||
# A command without a complete path is provided, search $PATH.
|
||||
|
||||
|
||||
AC_PATH_PROGS(POTENTIAL_$1, [$]$1)
|
||||
if test "x$POTENTIAL_$1" != x; then
|
||||
$1=$POTENTIAL_$1
|
||||
@ -375,12 +382,12 @@ AC_DEFUN([TOOLCHAIN_FIND_COMPILER],
|
||||
fi
|
||||
else
|
||||
# No user supplied value. Locate compiler ourselves.
|
||||
|
||||
|
||||
# If we are cross compiling, assume cross compilation tools follows the
|
||||
# cross compilation standard where they are prefixed with the autoconf
|
||||
# standard name for the target. For example the binary
|
||||
# standard name for the target. For example the binary
|
||||
# i686-sun-solaris2.10-gcc will cross compile for i686-sun-solaris2.10.
|
||||
# If we are not cross compiling, then the default compiler name will be
|
||||
# If we are not cross compiling, then the default compiler name will be
|
||||
# used.
|
||||
|
||||
$1=
|
||||
@ -450,9 +457,9 @@ AC_DEFUN([TOOLCHAIN_FIND_COMPILER],
|
||||
TOOLCHAIN_CHECK_COMPILER_VERSION([$1], [$COMPILER_NAME])
|
||||
])
|
||||
|
||||
# Detect the core components of the toolchain, i.e. the compilers (CC and CXX),
|
||||
# preprocessor (CPP and CXXCPP), the linker (LD), the assembler (AS) and the
|
||||
# archiver (AR). Verify that the compilers are correct according to the
|
||||
# Detect the core components of the toolchain, i.e. the compilers (CC and CXX),
|
||||
# preprocessor (CPP and CXXCPP), the linker (LD), the assembler (AS) and the
|
||||
# archiver (AR). Verify that the compilers are correct according to the
|
||||
# toolchain type.
|
||||
AC_DEFUN_ONCE([TOOLCHAIN_DETECT_TOOLCHAIN_CORE],
|
||||
[
|
||||
@ -529,7 +536,7 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETECT_TOOLCHAIN_CORE],
|
||||
])
|
||||
|
||||
# Setup additional tools that is considered a part of the toolchain, but not the
|
||||
# core part. Many of these are highly platform-specific and do not exist,
|
||||
# core part. Many of these are highly platform-specific and do not exist,
|
||||
# and/or are not needed on all platforms.
|
||||
AC_DEFUN_ONCE([TOOLCHAIN_DETECT_TOOLCHAIN_EXTRA],
|
||||
[
|
||||
@ -551,7 +558,7 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETECT_TOOLCHAIN_EXTRA],
|
||||
AC_CHECK_PROG([DUMPBIN], [dumpbin], [dumpbin],,,)
|
||||
BASIC_FIXUP_EXECUTABLE(DUMPBIN)
|
||||
fi
|
||||
|
||||
|
||||
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
|
||||
BASIC_PATH_PROGS(STRIP, strip)
|
||||
BASIC_FIXUP_EXECUTABLE(STRIP)
|
||||
@ -559,7 +566,7 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETECT_TOOLCHAIN_EXTRA],
|
||||
BASIC_FIXUP_EXECUTABLE(NM)
|
||||
BASIC_PATH_PROGS(GNM, gnm)
|
||||
BASIC_FIXUP_EXECUTABLE(GNM)
|
||||
|
||||
|
||||
BASIC_PATH_PROGS(MCS, mcs)
|
||||
BASIC_FIXUP_EXECUTABLE(MCS)
|
||||
elif test "x$OPENJDK_TARGET_OS" != xwindows; then
|
||||
@ -592,17 +599,17 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETECT_TOOLCHAIN_EXTRA],
|
||||
|
||||
# Setup the build tools (i.e, the compiler and linker used to build programs
|
||||
# that should be run on the build platform, not the target platform, as a build
|
||||
# helper). Since the non-cross-compile case uses the normal, target compilers
|
||||
# helper). Since the non-cross-compile case uses the normal, target compilers
|
||||
# for this, we can only do this after these have been setup.
|
||||
AC_DEFUN_ONCE([TOOLCHAIN_SETUP_BUILD_COMPILERS],
|
||||
[
|
||||
[
|
||||
if test "x$COMPILE_TYPE" = "xcross"; then
|
||||
# Now we need to find a C/C++ compiler that can build executables for the
|
||||
# build platform. We can't use the AC_PROG_CC macro, since it can only be
|
||||
# used once. Also, we need to do this without adding a tools dir to the
|
||||
# path, otherwise we might pick up cross-compilers which don't use standard
|
||||
# naming.
|
||||
|
||||
|
||||
# FIXME: we should list the discovered compilers as an exclude pattern!
|
||||
# If we do that, we can do this detection before POST_DETECTION, and still
|
||||
# find the build compilers in the tools dir, if needed.
|
||||
@ -690,15 +697,39 @@ AC_DEFUN_ONCE([TOOLCHAIN_MISC_CHECKS],
|
||||
# If this is a --hash-style=gnu system, use --hash-style=both, why?
|
||||
HAS_GNU_HASH=`$CC -dumpspecs 2>/dev/null | $GREP 'hash-style=gnu'`
|
||||
# This is later checked when setting flags.
|
||||
|
||||
# "-Og" suppported for GCC 4.8 and later
|
||||
CFLAG_OPTIMIZE_DEBUG_FLAG="-Og"
|
||||
FLAGS_COMPILER_CHECK_ARGUMENTS([$CFLAG_OPTIMIZE_DEBUG_FLAG],
|
||||
[HAS_CFLAG_OPTIMIZE_DEBUG=true],
|
||||
[HAS_CFLAG_OPTIMIZE_DEBUG=false])
|
||||
|
||||
# "-fsanitize=undefined" supported for GCC 4.9 and later
|
||||
CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG="-fsanitize=undefined -fsanitize-recover"
|
||||
FLAGS_COMPILER_CHECK_ARGUMENTS([$CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG],
|
||||
[HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR=true],
|
||||
[HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR=false])
|
||||
|
||||
# "-z relro" supported in GNU binutils 2.17 and later
|
||||
LINKER_RELRO_FLAG="-Xlinker -z -Xlinker relro"
|
||||
FLAGS_LINKER_CHECK_ARGUMENTS([$LINKER_RELRO_FLAG],
|
||||
[HAS_LINKER_RELRO=true],
|
||||
[HAS_LINKER_RELRO=false])
|
||||
|
||||
# "-z now" supported in GNU binutils 2.11 and later
|
||||
LINKER_NOW_FLAG="-Xlinker -z -Xlinker now"
|
||||
FLAGS_LINKER_CHECK_ARGUMENTS([$LINKER_NOW_FLAG],
|
||||
[HAS_LINKER_NOW=true],
|
||||
[HAS_LINKER_NOW=false])
|
||||
fi
|
||||
|
||||
# Check for broken SuSE 'ld' for which 'Only anonymous version tag is allowed
|
||||
# Check for broken SuSE 'ld' for which 'Only anonymous version tag is allowed
|
||||
# in executable.'
|
||||
USING_BROKEN_SUSE_LD=no
|
||||
if test "x$OPENJDK_TARGET_OS" = xlinux && test "x$TOOLCHAIN_TYPE" = xgcc; then
|
||||
AC_MSG_CHECKING([for broken SuSE 'ld' which only understands anonymous version tags in executables])
|
||||
echo "SUNWprivate_1.1 { local: *; };" > version-script.map
|
||||
echo "int main() { }" > main.c
|
||||
$ECHO "SUNWprivate_1.1 { local: *; };" > version-script.map
|
||||
$ECHO "int main() { }" > main.c
|
||||
if $CXX -Xlinker -version-script=version-script.map main.c 2>&AS_MESSAGE_LOG_FD >&AS_MESSAGE_LOG_FD; then
|
||||
AC_MSG_RESULT(no)
|
||||
USING_BROKEN_SUSE_LD=no
|
||||
|
@ -261,3 +261,7 @@ e54022d0dd92106fff7f7fe670010cd7e6517ee3 jdk9-b15
|
||||
422ef9d29d84f571453f015c4cb8713c3af70ee4 jdk9-b16
|
||||
4c75c2ca7cf3e0618315879acf17f42c8fcd0c09 jdk9-b17
|
||||
77565aaaa2bb814e94817e92d680168052a25395 jdk9-b18
|
||||
eecc1b6adc7e193d00a0641eb0963add5a4c06e8 jdk9-b19
|
||||
87f36eecb1665012d01c5cf102494e591c943ea6 jdk9-b20
|
||||
3615a4e7f0542ca7552ad6454b742c73ee211d8e jdk9-b21
|
||||
ddc07abf4307855c0dc904cc5c96cc764023a930 jdk9-b22
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -35,7 +35,8 @@ import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.io.ObjectOutputStream;
|
||||
import java.io.ObjectOutput;
|
||||
import java.util.Hashtable;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
|
||||
import org.omg.CORBA.INTERNAL;
|
||||
|
||||
@ -49,7 +50,7 @@ public abstract class OutputStreamHook extends ObjectOutputStream
|
||||
*/
|
||||
private class HookPutFields extends ObjectOutputStream.PutField
|
||||
{
|
||||
private Hashtable fields = new Hashtable();
|
||||
private Map<String,Object> fields = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Put the value of the named boolean field into the persistent field.
|
||||
@ -140,7 +141,6 @@ public abstract class OutputStreamHook extends ObjectOutputStream
|
||||
public OutputStreamHook()
|
||||
throws java.io.IOException {
|
||||
super();
|
||||
|
||||
}
|
||||
|
||||
public void defaultWriteObject() throws IOException {
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
#
|
||||
# Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,9 +25,76 @@
|
||||
# questions.
|
||||
#
|
||||
|
||||
# Get clones of all nested repositories
|
||||
sh ./common/bin/hgforest.sh clone "$@" || exit 1
|
||||
to_stderr() {
|
||||
echo "$@" >&2
|
||||
}
|
||||
|
||||
error() {
|
||||
to_stderr "ERROR: $1"
|
||||
exit ${2:-126}
|
||||
}
|
||||
|
||||
warning() {
|
||||
to_stderr "WARNING: $1"
|
||||
}
|
||||
|
||||
version_field() {
|
||||
# rev is typically omitted for minor and major releases
|
||||
field=`echo ${1}.0 | cut -f ${2} -d .`
|
||||
if expr 1 + $field >/dev/null 2> /dev/null; then
|
||||
echo $field
|
||||
else
|
||||
echo -1
|
||||
fi
|
||||
}
|
||||
|
||||
# Version check
|
||||
|
||||
# required
|
||||
reqdmajor=1
|
||||
reqdminor=4
|
||||
reqdrev=0
|
||||
|
||||
# requested
|
||||
rqstmajor=2
|
||||
rqstminor=6
|
||||
rqstrev=3
|
||||
|
||||
|
||||
# installed
|
||||
hgwhere="`command -v hg`"
|
||||
if [ "x$hgwhere" = "x" ]; then
|
||||
error "Could not locate Mercurial command"
|
||||
fi
|
||||
|
||||
hgversion="`hg --version 2> /dev/null | sed -n -e 's@^Mercurial Distributed SCM (version \([^+]*\).*)\$@\1@p'`"
|
||||
if [ "x${hgversion}" = "x" ] ; then
|
||||
error "Could not determine Mercurial version of $hgwhere"
|
||||
fi
|
||||
|
||||
hgmajor="`version_field $hgversion 1`"
|
||||
hgminor="`version_field $hgversion 2`"
|
||||
hgrev="`version_field $hgversion 3`"
|
||||
|
||||
if [ $hgmajor -eq -1 -o $hgminor -eq -1 -o $hgrev -eq -1 ] ; then
|
||||
error "Could not determine Mercurial version of $hgwhere from \"$hgversion\""
|
||||
fi
|
||||
|
||||
|
||||
# Require
|
||||
if [ $hgmajor -lt $reqdmajor -o \( $hgmajor -eq $reqdmajor -a $hgminor -lt $reqdminor \) -o \( $hgmajor -eq $reqdmajor -a $hgminor -eq $reqdminor -a $hgrev -lt $reqdrev \) ] ; then
|
||||
error "Mercurial version $reqdmajor.$reqdminor.$reqdrev or later is required. $hgwhere is version $hgversion"
|
||||
fi
|
||||
|
||||
|
||||
# Request
|
||||
if [ $hgmajor -lt $rqstmajor -o \( $hgmajor -eq $rqstmajor -a $hgminor -lt $rqstminor \) -o \( $hgmajor -eq $rqstmajor -a $hgminor -eq $rqstminor -a $hgrev -lt $rqstrev \) ] ; then
|
||||
warning "Mercurial version $rqstmajor.$rqstminor.$rqstrev or later is recommended. $hgwhere is version $hgversion"
|
||||
fi
|
||||
|
||||
|
||||
# Get clones of all absent nested repositories (harmless if already exist)
|
||||
sh ./common/bin/hgforest.sh clone "$@" || exit $?
|
||||
|
||||
# Update all existing repositories to the latest sources
|
||||
sh ./common/bin/hgforest.sh pull -u
|
||||
|
||||
|
@ -421,3 +421,7 @@ bd333491bb6c012d7b606939406d0fa9a5ac7ffd jdk9-b14
|
||||
b14e7c0b7d3ec04127f565cda1d84122e205680c jdk9-b16
|
||||
14b656df31c2cb09c505921061e79977823de71a jdk9-b17
|
||||
871fd128548480095e0dc3fc34c422666baeec75 jdk9-b18
|
||||
d4cffb3ae6213c66c7522ebffe0349360a45f0ef jdk9-b19
|
||||
c1af79d122ec9f715fa29312b5e91763f3a4dfc4 jdk9-b20
|
||||
17b4a5e831b398738feedb0afe75245744510153 jdk9-b21
|
||||
518d1fcc0799494f013e00e0a94a91b6f212d54f jdk9-b22
|
||||
|
@ -24,23 +24,26 @@
|
||||
|
||||
package sun.jvm.hotspot.gc_implementation.g1;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Observable;
|
||||
import java.util.Observer;
|
||||
|
||||
import sun.jvm.hotspot.debugger.Address;
|
||||
import sun.jvm.hotspot.memory.ContiguousSpace;
|
||||
import sun.jvm.hotspot.memory.CompactibleSpace;
|
||||
import sun.jvm.hotspot.memory.MemRegion;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.types.AddressField;
|
||||
import sun.jvm.hotspot.types.CIntegerField;
|
||||
import sun.jvm.hotspot.types.Type;
|
||||
import sun.jvm.hotspot.types.TypeDataBase;
|
||||
|
||||
// Mirror class for HeapRegion. Currently we don't actually include
|
||||
// any of its fields but only iterate over it (which we get "for free"
|
||||
// as HeapRegion ultimately inherits from ContiguousSpace).
|
||||
// any of its fields but only iterate over it.
|
||||
|
||||
public class HeapRegion extends ContiguousSpace {
|
||||
public class HeapRegion extends CompactibleSpace {
|
||||
// static int GrainBytes;
|
||||
static private CIntegerField grainBytesField;
|
||||
static private AddressField topField;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
@ -54,6 +57,8 @@ public class HeapRegion extends ContiguousSpace {
|
||||
Type type = db.lookupType("HeapRegion");
|
||||
|
||||
grainBytesField = type.getCIntegerField("GrainBytes");
|
||||
topField = type.getAddressField("_top");
|
||||
|
||||
}
|
||||
|
||||
static public long grainBytes() {
|
||||
@ -63,4 +68,25 @@ public class HeapRegion extends ContiguousSpace {
|
||||
public HeapRegion(Address addr) {
|
||||
super(addr);
|
||||
}
|
||||
|
||||
public Address top() {
|
||||
return topField.getValue(addr);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List getLiveRegions() {
|
||||
List res = new ArrayList();
|
||||
res.add(new MemRegion(bottom(), top()));
|
||||
return res;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long used() {
|
||||
return top().minus(bottom());
|
||||
}
|
||||
|
||||
@Override
|
||||
public long free() {
|
||||
return end().minus(top());
|
||||
}
|
||||
}
|
||||
|
@ -280,16 +280,7 @@ endif
|
||||
|
||||
# optimization control flags (Used by fastdebug and release variants)
|
||||
OPT_CFLAGS/NOOPT=-O0
|
||||
ifeq ($(USE_CLANG), true)
|
||||
# Clang does not support -Og
|
||||
OPT_CFLAGS/DEBUG=-O0
|
||||
else ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
|
||||
# Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
|
||||
OPT_CFLAGS/DEBUG=-Og
|
||||
else
|
||||
# Allow no optimizations.
|
||||
OPT_CFLAGS/DEBUG=-O0
|
||||
endif
|
||||
OPT_CFLAGS/DEBUG=-O0
|
||||
OPT_CFLAGS/SIZE=-Os
|
||||
OPT_CFLAGS/SPEED=-O3
|
||||
|
||||
@ -457,16 +448,8 @@ ifeq ($(USE_CLANG), true)
|
||||
CFLAGS += -flimit-debug-info
|
||||
endif
|
||||
|
||||
ifeq ($(USE_CLANG), true)
|
||||
# Clang does not support -Og
|
||||
DEBUG_CFLAGS=-O0
|
||||
else ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
|
||||
# Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
|
||||
DEBUG_CFLAGS=-Og
|
||||
else
|
||||
# Allow no optimizations.
|
||||
DEBUG_CFLAGS=-O0
|
||||
endif
|
||||
# Allow no optimizations.
|
||||
DEBUG_CFLAGS=-O0
|
||||
|
||||
# DEBUG_BINARIES uses full -g debug information for all configs
|
||||
ifeq ($(DEBUG_BINARIES), true)
|
||||
|
@ -93,6 +93,7 @@ ifeq ($(INCLUDE_ALL_GCS), false)
|
||||
ageTable.cpp \
|
||||
collectorCounters.cpp \
|
||||
cSpaceCounters.cpp \
|
||||
gcId.cpp \
|
||||
gcPolicyCounters.cpp \
|
||||
gcStats.cpp \
|
||||
gcTimer.cpp \
|
||||
|
@ -350,21 +350,25 @@ jprt.make.rule.test.targets.standard.internalvmtests = \
|
||||
${jprt.my.windows.i586}-fastdebug-c2-internalvmtests, \
|
||||
${jprt.my.windows.x64}-fastdebug-c2-internalvmtests
|
||||
|
||||
jprt.make.rule.test.targets.standard.wbapi = \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-wbapitest, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-wbapitest, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c2-wbapitest, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-wbapitest, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-c2-wbapitest, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-wbapitest, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-wbapitest, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-c1-wbapitest
|
||||
jprt.make.rule.test.targets.standard.reg.group = \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GROUP, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GROUP, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c2-GROUP, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GROUP, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-c2-GROUP, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GROUP, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-GROUP, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-c1-GROUP
|
||||
|
||||
jprt.make.rule.test.targets.standard = \
|
||||
${jprt.make.rule.test.targets.standard.client}, \
|
||||
${jprt.make.rule.test.targets.standard.server}, \
|
||||
${jprt.make.rule.test.targets.standard.internalvmtests}, \
|
||||
${jprt.make.rule.test.targets.standard.wbapi}
|
||||
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_wbapitest}, \
|
||||
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_compiler}, \
|
||||
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_gc}, \
|
||||
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_runtime}, \
|
||||
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_serviceability}
|
||||
|
||||
jprt.make.rule.test.targets.embedded = \
|
||||
${jprt.make.rule.test.targets.standard.client}
|
||||
|
@ -40,7 +40,14 @@ else
|
||||
ifneq ($(ALT_SDT_H),)
|
||||
SDT_H_FILE = $(ALT_SDT_H)
|
||||
else
|
||||
SDT_H_FILE = /usr/include/sys/sdt.h
|
||||
ifeq ($(USE_CLANG), true)
|
||||
# Clang doesn't support the -print-sysroot option and there is no known equivalent
|
||||
# option, so fall back to using / as sysroot
|
||||
SDT_SYSROOT=
|
||||
else
|
||||
SDT_SYSROOT=$(shell $(CXX) -print-sysroot)
|
||||
endif
|
||||
SDT_H_FILE = $(SDT_SYSROOT)/usr/include/sys/sdt.h
|
||||
endif
|
||||
|
||||
DTRACE_ENABLED = $(shell test -f $(SDT_H_FILE) && echo $(SDT_H_FILE))
|
||||
|
@ -231,13 +231,7 @@ CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@))
|
||||
|
||||
# optimization control flags (Used by fastdebug and release variants)
|
||||
OPT_CFLAGS/NOOPT=-O0
|
||||
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
|
||||
# Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
|
||||
OPT_CFLAGS/DEBUG=-Og
|
||||
else
|
||||
# Allow no optimizations.
|
||||
OPT_CFLAGS/DEBUG=-O0
|
||||
endif
|
||||
OPT_CFLAGS/DEBUG=-O0
|
||||
OPT_CFLAGS/SIZE=-Os
|
||||
OPT_CFLAGS/SPEED=-O3
|
||||
|
||||
@ -344,13 +338,8 @@ ifeq ($(USE_CLANG), true)
|
||||
CFLAGS += -flimit-debug-info
|
||||
endif
|
||||
|
||||
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
|
||||
# Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
|
||||
DEBUG_CFLAGS=-Og
|
||||
else
|
||||
# Allow no optimizations.
|
||||
DEBUG_CFLAGS=-O0
|
||||
endif
|
||||
# Allow no optimizations.
|
||||
DEBUG_CFLAGS=-O0
|
||||
|
||||
# DEBUG_BINARIES uses full -g debug information for all configs
|
||||
ifeq ($(DEBUG_BINARIES), true)
|
||||
|
@ -127,13 +127,7 @@ CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@))
|
||||
|
||||
# optimization control flags (Used by fastdebug and release variants)
|
||||
OPT_CFLAGS/NOOPT=-O0
|
||||
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
|
||||
# Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
|
||||
OPT_CFLAGS/DEBUG=-Og
|
||||
+else
|
||||
# Allow no optimizations.
|
||||
OPT_CFLAGS/DEBUG=-O0
|
||||
endif
|
||||
OPT_CFLAGS/DEBUG=-O0
|
||||
OPT_CFLAGS/SIZE=-Os
|
||||
OPT_CFLAGS/SPEED=-O3
|
||||
|
||||
@ -229,14 +223,8 @@ SHARED_FLAG = -shared
|
||||
#------------------------------------------------------------------------
|
||||
# Debug flags
|
||||
|
||||
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
|
||||
# Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
|
||||
DEBUG_CFLAGS=-Og
|
||||
else
|
||||
# Allow no optimizations.
|
||||
DEBUG_CFLAGS=-O0
|
||||
endif
|
||||
|
||||
# Allow no optimizations.
|
||||
DEBUG_CFLAGS=-O0
|
||||
|
||||
# Use the stabs format for debugging information (this is the default
|
||||
# on gcc-2.91). It's good enough, has all the information about line
|
||||
|
@ -2809,12 +2809,10 @@ bool os::dont_yield() {
|
||||
return DontYieldALot;
|
||||
}
|
||||
|
||||
void os::yield() {
|
||||
void os::naked_yield() {
|
||||
sched_yield();
|
||||
}
|
||||
|
||||
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// thread priority support
|
||||
|
||||
@ -3071,7 +3069,7 @@ static bool do_suspend(OSThread* osthread) {
|
||||
|
||||
for (int n = 0; !osthread->sr.is_suspended(); n++) {
|
||||
for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
|
||||
os::yield();
|
||||
os::naked_yield();
|
||||
}
|
||||
|
||||
// timeout, try to cancel the request
|
||||
@ -3105,7 +3103,7 @@ static void do_resume(OSThread* osthread) {
|
||||
if (sr_notify(osthread) == 0) {
|
||||
for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
|
||||
for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
|
||||
os::yield();
|
||||
os::naked_yield();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -2596,12 +2596,10 @@ bool os::dont_yield() {
|
||||
return DontYieldALot;
|
||||
}
|
||||
|
||||
void os::yield() {
|
||||
void os::naked_yield() {
|
||||
sched_yield();
|
||||
}
|
||||
|
||||
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// thread priority support
|
||||
|
||||
@ -4218,22 +4216,12 @@ static struct timespec* compute_abstime(struct timespec* abstime, jlong millis)
|
||||
return abstime;
|
||||
}
|
||||
|
||||
|
||||
// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
|
||||
// Conceptually TryPark() should be equivalent to park(0).
|
||||
|
||||
int os::PlatformEvent::TryPark() {
|
||||
for (;;) {
|
||||
const int v = _Event;
|
||||
guarantee((v == 0) || (v == 1), "invariant");
|
||||
if (Atomic::cmpxchg(0, &_Event, v) == v) return v;
|
||||
}
|
||||
}
|
||||
|
||||
void os::PlatformEvent::park() { // AKA "down()"
|
||||
// Invariant: Only the thread associated with the Event/PlatformEvent
|
||||
// may call park().
|
||||
// TODO: assert that _Assoc != NULL or _Assoc == Self
|
||||
assert(_nParked == 0, "invariant");
|
||||
|
||||
int v;
|
||||
for (;;) {
|
||||
v = _Event;
|
||||
@ -4333,8 +4321,7 @@ void os::PlatformEvent::unpark() {
|
||||
// 1 :=> 1
|
||||
// -1 :=> either 0 or 1; must signal target thread
|
||||
// That is, we can safely transition _Event from -1 to either
|
||||
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
|
||||
// unpark() calls.
|
||||
// 0 or 1.
|
||||
// See also: "Semaphores in Plan 9" by Mullender & Cox
|
||||
//
|
||||
// Note: Forcing a transition from "-1" to "1" on an unpark() means
|
||||
@ -4541,10 +4528,9 @@ void Parker::park(bool isAbsolute, jlong time) {
|
||||
}
|
||||
|
||||
void Parker::unpark() {
|
||||
int s, status;
|
||||
status = pthread_mutex_lock(_mutex);
|
||||
int status = pthread_mutex_lock(_mutex);
|
||||
assert(status == 0, "invariant");
|
||||
s = _counter;
|
||||
const int s = _counter;
|
||||
_counter = 1;
|
||||
if (s < 1) {
|
||||
if (WorkAroundNPTLTimedWaitHang) {
|
||||
|
@ -219,7 +219,6 @@ class PlatformEvent : public CHeapObj<mtInternal> {
|
||||
int fired() { return _Event; }
|
||||
void park();
|
||||
void unpark();
|
||||
int TryPark();
|
||||
int park(jlong millis);
|
||||
void SetAssociation(Thread * a) { _Assoc = a; }
|
||||
};
|
||||
|
@ -3791,12 +3791,10 @@ bool os::dont_yield() {
|
||||
return DontYieldALot;
|
||||
}
|
||||
|
||||
void os::yield() {
|
||||
void os::naked_yield() {
|
||||
sched_yield();
|
||||
}
|
||||
|
||||
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// thread priority support
|
||||
|
||||
@ -5457,22 +5455,12 @@ static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
|
||||
return abstime;
|
||||
}
|
||||
|
||||
|
||||
// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
|
||||
// Conceptually TryPark() should be equivalent to park(0).
|
||||
|
||||
int os::PlatformEvent::TryPark() {
|
||||
for (;;) {
|
||||
const int v = _Event;
|
||||
guarantee((v == 0) || (v == 1), "invariant");
|
||||
if (Atomic::cmpxchg(0, &_Event, v) == v) return v;
|
||||
}
|
||||
}
|
||||
|
||||
void os::PlatformEvent::park() { // AKA "down()"
|
||||
// Invariant: Only the thread associated with the Event/PlatformEvent
|
||||
// may call park().
|
||||
// TODO: assert that _Assoc != NULL or _Assoc == Self
|
||||
assert(_nParked == 0, "invariant");
|
||||
|
||||
int v;
|
||||
for (;;) {
|
||||
v = _Event;
|
||||
@ -5572,8 +5560,7 @@ void os::PlatformEvent::unpark() {
|
||||
// 1 :=> 1
|
||||
// -1 :=> either 0 or 1; must signal target thread
|
||||
// That is, we can safely transition _Event from -1 to either
|
||||
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
|
||||
// unpark() calls.
|
||||
// 0 or 1.
|
||||
// See also: "Semaphores in Plan 9" by Mullender & Cox
|
||||
//
|
||||
// Note: Forcing a transition from "-1" to "1" on an unpark() means
|
||||
@ -5801,10 +5788,9 @@ void Parker::park(bool isAbsolute, jlong time) {
|
||||
}
|
||||
|
||||
void Parker::unpark() {
|
||||
int s, status;
|
||||
status = pthread_mutex_lock(_mutex);
|
||||
int status = pthread_mutex_lock(_mutex);
|
||||
assert(status == 0, "invariant");
|
||||
s = _counter;
|
||||
const int s = _counter;
|
||||
_counter = 1;
|
||||
if (s < 1) {
|
||||
// thread might be parked
|
||||
|
@ -315,7 +315,6 @@ class PlatformEvent : public CHeapObj<mtInternal> {
|
||||
int fired() { return _Event; }
|
||||
void park();
|
||||
void unpark();
|
||||
int TryPark();
|
||||
int park(jlong millis); // relative timed-wait only
|
||||
void SetAssociation(Thread * a) { _Assoc = a; }
|
||||
};
|
||||
|
@ -3174,20 +3174,14 @@ bool os::dont_yield() {
|
||||
}
|
||||
}
|
||||
|
||||
// Caveat: Solaris os::yield() causes a thread-state transition whereas
|
||||
// the linux and win32 implementations do not. This should be checked.
|
||||
|
||||
void os::yield() {
|
||||
// Yields to all threads with same or greater priority
|
||||
os::sleep(Thread::current(), 0, false);
|
||||
}
|
||||
|
||||
// Note that yield semantics are defined by the scheduling class to which
|
||||
// the thread currently belongs. Typically, yield will _not yield to
|
||||
// other equal or higher priority threads that reside on the dispatch queues
|
||||
// of other CPUs.
|
||||
|
||||
os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
|
||||
void os::naked_yield() {
|
||||
thr_yield();
|
||||
}
|
||||
|
||||
// Interface for setting lwp priorities. If we are using T2 libthread,
|
||||
// which forces the use of BoundThreads or we manually set UseBoundThreads,
|
||||
@ -5441,20 +5435,11 @@ static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
|
||||
return abstime;
|
||||
}
|
||||
|
||||
// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
|
||||
// Conceptually TryPark() should be equivalent to park(0).
|
||||
|
||||
int os::PlatformEvent::TryPark() {
|
||||
for (;;) {
|
||||
const int v = _Event;
|
||||
guarantee((v == 0) || (v == 1), "invariant");
|
||||
if (Atomic::cmpxchg(0, &_Event, v) == v) return v;
|
||||
}
|
||||
}
|
||||
|
||||
void os::PlatformEvent::park() { // AKA: down()
|
||||
// Invariant: Only the thread associated with the Event/PlatformEvent
|
||||
// may call park().
|
||||
assert(_nParked == 0, "invariant");
|
||||
|
||||
int v;
|
||||
for (;;) {
|
||||
v = _Event;
|
||||
@ -5541,8 +5526,7 @@ void os::PlatformEvent::unpark() {
|
||||
// 1 :=> 1
|
||||
// -1 :=> either 0 or 1; must signal target thread
|
||||
// That is, we can safely transition _Event from -1 to either
|
||||
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
|
||||
// unpark() calls.
|
||||
// 0 or 1.
|
||||
// See also: "Semaphores in Plan 9" by Mullender & Cox
|
||||
//
|
||||
// Note: Forcing a transition from "-1" to "1" on an unpark() means
|
||||
@ -5746,10 +5730,9 @@ void Parker::park(bool isAbsolute, jlong time) {
|
||||
}
|
||||
|
||||
void Parker::unpark() {
|
||||
int s, status;
|
||||
status = os::Solaris::mutex_lock(_mutex);
|
||||
int status = os::Solaris::mutex_lock(_mutex);
|
||||
assert(status == 0, "invariant");
|
||||
s = _counter;
|
||||
const int s = _counter;
|
||||
_counter = 1;
|
||||
status = os::Solaris::mutex_unlock(_mutex);
|
||||
assert(status == 0, "invariant");
|
||||
|
@ -332,7 +332,6 @@ class PlatformEvent : public CHeapObj<mtInternal> {
|
||||
int fired() { return _Event; }
|
||||
void park();
|
||||
int park(jlong millis);
|
||||
int TryPark();
|
||||
void unpark();
|
||||
};
|
||||
|
||||
|
@ -3516,19 +3516,16 @@ void os::infinite_sleep() {
|
||||
|
||||
typedef BOOL (WINAPI * STTSignature)(void);
|
||||
|
||||
os::YieldResult os::NakedYield() {
|
||||
void os::naked_yield() {
|
||||
// Use either SwitchToThread() or Sleep(0)
|
||||
// Consider passing back the return value from SwitchToThread().
|
||||
if (os::Kernel32Dll::SwitchToThreadAvailable()) {
|
||||
return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY;
|
||||
SwitchToThread();
|
||||
} else {
|
||||
Sleep(0);
|
||||
}
|
||||
return os::YIELD_UNKNOWN;
|
||||
}
|
||||
|
||||
void os::yield() { os::NakedYield(); }
|
||||
|
||||
// Win32 only gives you access to seven real priorities at a time,
|
||||
// so we compress Java's ten down to seven. It would be better
|
||||
// if we dynamically adjusted relative priorities.
|
||||
@ -4877,8 +4874,7 @@ void os::PlatformEvent::unpark() {
|
||||
// 1 :=> 1
|
||||
// -1 :=> either 0 or 1; must signal target thread
|
||||
// That is, we can safely transition _Event from -1 to either
|
||||
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
|
||||
// unpark() calls.
|
||||
// 0 or 1.
|
||||
// See also: "Semaphores in Plan 9" by Mullender & Cox
|
||||
//
|
||||
// Note: Forcing a transition from "-1" to "1" on an unpark() means
|
||||
|
@ -1998,7 +1998,13 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
|
||||
if (!UseInlineCaches && is_loaded && code == Bytecodes::_invokevirtual
|
||||
&& !target->can_be_statically_bound()) {
|
||||
// Find a vtable index if one is available
|
||||
vtable_index = target->resolve_vtable_index(calling_klass, callee_holder);
|
||||
// For arrays, callee_holder is Object. Resolving the call with
|
||||
// Object would allow an illegal call to finalize() on an
|
||||
// array. We use holder instead: illegal calls to finalize() won't
|
||||
// be compiled as vtable calls (IC call resolution will catch the
|
||||
// illegal call) and the few legal calls on array types won't be
|
||||
// either.
|
||||
vtable_index = target->resolve_vtable_index(calling_klass, holder);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1051,6 +1051,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
|
||||
n_copy->set_data((intx) (load_klass()));
|
||||
} else {
|
||||
assert(mirror() != NULL, "klass not set");
|
||||
// Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
|
||||
n_copy->set_data(cast_from_oop<intx>(mirror()));
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,7 @@ ciConstantPoolCache::ciConstantPoolCache(Arena* arena,
|
||||
int expected_size) {
|
||||
_elements =
|
||||
new (arena) GrowableArray<void*>(arena, expected_size, 0, 0);
|
||||
_keys = new (arena) GrowableArray<intptr_t>(arena, expected_size, 0, 0);
|
||||
_keys = new (arena) GrowableArray<int>(arena, expected_size, 0, 0);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
@ -35,7 +35,7 @@
|
||||
// Usage note: this klass has nothing to do with ConstantPoolCache*.
|
||||
class ciConstantPoolCache : public ResourceObj {
|
||||
private:
|
||||
GrowableArray<intptr_t>* _keys;
|
||||
GrowableArray<int>* _keys;
|
||||
GrowableArray<void*>* _elements;
|
||||
|
||||
int find(int index);
|
||||
|
@ -185,6 +185,10 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
void ensure_metadata_alive(ciMetadata* m) {
|
||||
_factory->ensure_metadata_alive(m);
|
||||
}
|
||||
|
||||
ciInstance* get_instance(oop o) {
|
||||
if (o == NULL) return NULL;
|
||||
return get_object(o)->as_instance();
|
||||
|
@ -43,6 +43,7 @@ class ciKlass : public ciType {
|
||||
friend class ciMethod;
|
||||
friend class ciMethodData;
|
||||
friend class ciObjArrayKlass;
|
||||
friend class ciReceiverTypeData;
|
||||
|
||||
private:
|
||||
ciSymbol* _name;
|
||||
|
@ -170,6 +170,7 @@ void ciReceiverTypeData::translate_receiver_data_from(const ProfileData* data) {
|
||||
Klass* k = data->as_ReceiverTypeData()->receiver(row);
|
||||
if (k != NULL) {
|
||||
ciKlass* klass = CURRENT_ENV->get_klass(k);
|
||||
CURRENT_ENV->ensure_metadata_alive(klass);
|
||||
set_receiver(row, klass);
|
||||
}
|
||||
}
|
||||
@ -191,6 +192,7 @@ void ciReturnTypeEntry::translate_type_data_from(const ReturnTypeEntry* ret) {
|
||||
void ciSpeculativeTrapData::translate_from(const ProfileData* data) {
|
||||
Method* m = data->as_SpeculativeTrapData()->method();
|
||||
ciMethod* ci_m = CURRENT_ENV->get_method(m);
|
||||
CURRENT_ENV->ensure_metadata_alive(ci_m);
|
||||
set_method(ci_m);
|
||||
}
|
||||
|
||||
|
@ -70,6 +70,7 @@ protected:
|
||||
Klass* v = TypeEntries::valid_klass(k);
|
||||
if (v != NULL) {
|
||||
ciKlass* klass = CURRENT_ENV->get_klass(v);
|
||||
CURRENT_ENV->ensure_metadata_alive(klass);
|
||||
return with_status(klass, k);
|
||||
}
|
||||
return with_status(NULL, k);
|
||||
|
@ -46,6 +46,9 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oop.inline2.hpp"
|
||||
#include "runtime/fieldType.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
# include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#endif
|
||||
|
||||
// ciObjectFactory
|
||||
//
|
||||
@ -374,6 +377,37 @@ ciMetadata* ciObjectFactory::create_new_object(Metadata* o) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciObjectFactory::ensure_metadata_alive
|
||||
//
|
||||
// Ensure that the metadata wrapped by the ciMetadata is kept alive by GC.
|
||||
// This is primarily useful for metadata which is considered as weak roots
|
||||
// by the GC but need to be strong roots if reachable from a current compilation.
|
||||
//
|
||||
void ciObjectFactory::ensure_metadata_alive(ciMetadata* m) {
|
||||
ASSERT_IN_VM; // We're handling raw oops here.
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (!UseG1GC) {
|
||||
return;
|
||||
}
|
||||
Klass* metadata_owner_klass;
|
||||
if (m->is_klass()) {
|
||||
metadata_owner_klass = m->as_klass()->get_Klass();
|
||||
} else if (m->is_method()) {
|
||||
metadata_owner_klass = m->as_method()->get_Method()->constants()->pool_holder();
|
||||
} else {
|
||||
fatal("Not implemented for other types of metadata");
|
||||
}
|
||||
|
||||
oop metadata_holder = metadata_owner_klass->klass_holder();
|
||||
if (metadata_holder != NULL) {
|
||||
G1SATBCardTableModRefBS::enqueue(metadata_holder);
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------
|
||||
// ciObjectFactory::get_unloaded_method
|
||||
//
|
||||
|
@ -75,6 +75,8 @@ private:
|
||||
ciObject* create_new_object(oop o);
|
||||
ciMetadata* create_new_object(Metadata* o);
|
||||
|
||||
void ensure_metadata_alive(ciMetadata* m);
|
||||
|
||||
static bool is_equal(NonPermObject* p, oop key) {
|
||||
return p->object()->get_oop() == key;
|
||||
}
|
||||
|
@ -4590,8 +4590,9 @@ void ClassFileParser::check_final_method_override(instanceKlassHandle this_klass
|
||||
Exceptions::fthrow(
|
||||
THREAD_AND_LOCATION,
|
||||
vmSymbols::java_lang_VerifyError(),
|
||||
"class %s overrides final method %s.%s",
|
||||
"class %s overrides final method %s.%s%s",
|
||||
this_klass->external_name(),
|
||||
super_m->method_holder()->external_name(),
|
||||
name->as_C_string(),
|
||||
signature->as_C_string()
|
||||
);
|
||||
|
@ -332,6 +332,27 @@ void ClassLoaderData::unload() {
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
class AllAliveClosure : public OopClosure {
|
||||
BoolObjectClosure* _is_alive_closure;
|
||||
bool _found_dead;
|
||||
public:
|
||||
AllAliveClosure(BoolObjectClosure* is_alive_closure) : _is_alive_closure(is_alive_closure), _found_dead(false) {}
|
||||
template <typename T> void do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!_is_alive_closure->do_object_b(obj)) {
|
||||
_found_dead = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
void do_oop(oop* p) { do_oop_work<oop>(p); }
|
||||
void do_oop(narrowOop* p) { do_oop_work<narrowOop>(p); }
|
||||
bool found_dead() { return _found_dead; }
|
||||
};
|
||||
#endif
|
||||
|
||||
oop ClassLoaderData::keep_alive_object() const {
|
||||
assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
|
||||
return is_anonymous() ? _klasses->java_mirror() : class_loader();
|
||||
@ -341,7 +362,15 @@ bool ClassLoaderData::is_alive(BoolObjectClosure* is_alive_closure) const {
|
||||
bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
|
||||
|| is_alive_closure->do_object_b(keep_alive_object());
|
||||
|
||||
assert(!alive || claimed(), "must be claimed");
|
||||
#ifdef ASSERT
|
||||
if (alive) {
|
||||
AllAliveClosure all_alive_closure(is_alive_closure);
|
||||
KlassToOopClosure klass_closure(&all_alive_closure);
|
||||
const_cast<ClassLoaderData*>(this)->oops_do(&all_alive_closure, &klass_closure, false);
|
||||
assert(!all_alive_closure.found_dead(), err_msg("Found dead oop in alive cld: " PTR_FORMAT, p2i(this)));
|
||||
}
|
||||
#endif
|
||||
|
||||
return alive;
|
||||
}
|
||||
|
||||
@ -620,9 +649,9 @@ void ClassLoaderDataGraph::keep_alive_oops_do(OopClosure* f, KlassClosure* klass
|
||||
|
||||
void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
|
||||
if (ClassUnloading) {
|
||||
ClassLoaderDataGraph::keep_alive_oops_do(f, klass_closure, must_claim);
|
||||
keep_alive_oops_do(f, klass_closure, must_claim);
|
||||
} else {
|
||||
ClassLoaderDataGraph::oops_do(f, klass_closure, must_claim);
|
||||
oops_do(f, klass_closure, must_claim);
|
||||
}
|
||||
}
|
||||
|
||||
@ -632,6 +661,27 @@ void ClassLoaderDataGraph::cld_do(CLDClosure* cl) {
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) {
|
||||
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->_next) {
|
||||
CLDClosure* closure = cld->keep_alive() ? strong : weak;
|
||||
if (closure != NULL) {
|
||||
closure->do_cld(cld);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::keep_alive_cld_do(CLDClosure* cl) {
|
||||
roots_cld_do(cl, NULL);
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) {
|
||||
if (ClassUnloading) {
|
||||
keep_alive_cld_do(cl);
|
||||
} else {
|
||||
cld_do(cl);
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::classes_do(KlassClosure* klass_closure) {
|
||||
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
|
||||
cld->classes_do(klass_closure);
|
||||
@ -689,6 +739,16 @@ GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() {
|
||||
return array;
|
||||
}
|
||||
|
||||
bool ClassLoaderDataGraph::unload_list_contains(const void* x) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "only safe to call at safepoint");
|
||||
for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) {
|
||||
if (cld->metaspace_or_null() != NULL && cld->metaspace_or_null()->contains(x)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
|
||||
for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
|
||||
@ -809,6 +869,60 @@ Metaspace* ClassLoaderData::rw_metaspace() {
|
||||
return _rw_metaspace;
|
||||
}
|
||||
|
||||
ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic()
|
||||
: _next_klass(NULL) {
|
||||
ClassLoaderData* cld = ClassLoaderDataGraph::_head;
|
||||
Klass* klass = NULL;
|
||||
|
||||
// Find the first klass in the CLDG.
|
||||
while (cld != NULL) {
|
||||
klass = cld->_klasses;
|
||||
if (klass != NULL) {
|
||||
_next_klass = klass;
|
||||
return;
|
||||
}
|
||||
cld = cld->next();
|
||||
}
|
||||
}
|
||||
|
||||
Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass_in_cldg(Klass* klass) {
|
||||
Klass* next = klass->next_link();
|
||||
if (next != NULL) {
|
||||
return next;
|
||||
}
|
||||
|
||||
// No more klasses in the current CLD. Time to find a new CLD.
|
||||
ClassLoaderData* cld = klass->class_loader_data();
|
||||
while (next == NULL) {
|
||||
cld = cld->next();
|
||||
if (cld == NULL) {
|
||||
break;
|
||||
}
|
||||
next = cld->_klasses;
|
||||
}
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() {
|
||||
Klass* head = (Klass*)_next_klass;
|
||||
|
||||
while (head != NULL) {
|
||||
Klass* next = next_klass_in_cldg(head);
|
||||
|
||||
Klass* old_head = (Klass*)Atomic::cmpxchg_ptr(next, &_next_klass, head);
|
||||
|
||||
if (old_head == head) {
|
||||
return head; // Won the CAS.
|
||||
}
|
||||
|
||||
head = old_head;
|
||||
}
|
||||
|
||||
// Nothing more for the iterator to hand out.
|
||||
assert(head == NULL, err_msg("head is " PTR_FORMAT ", expected not null:", p2i(head)));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ClassLoaderDataGraphMetaspaceIterator::ClassLoaderDataGraphMetaspaceIterator() {
|
||||
_data = ClassLoaderDataGraph::_head;
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include "memory/metaspaceCounters.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
#if INCLUDE_TRACE
|
||||
# include "utilities/ticks.hpp"
|
||||
#endif
|
||||
@ -59,6 +58,7 @@ class Metadebug;
|
||||
class ClassLoaderDataGraph : public AllStatic {
|
||||
friend class ClassLoaderData;
|
||||
friend class ClassLoaderDataGraphMetaspaceIterator;
|
||||
friend class ClassLoaderDataGraphKlassIteratorAtomic;
|
||||
friend class VMStructs;
|
||||
private:
|
||||
// All CLDs (except the null CLD) can be reached by walking _head->_next->...
|
||||
@ -75,10 +75,16 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||
static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
|
||||
static void purge();
|
||||
static void clear_claimed_marks();
|
||||
// oops do
|
||||
static void oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim);
|
||||
static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
|
||||
static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
|
||||
static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
|
||||
// cld do
|
||||
static void cld_do(CLDClosure* cl);
|
||||
static void roots_cld_do(CLDClosure* strong, CLDClosure* weak);
|
||||
static void keep_alive_cld_do(CLDClosure* cl);
|
||||
static void always_strong_cld_do(CLDClosure* cl);
|
||||
// klass do
|
||||
static void classes_do(KlassClosure* klass_closure);
|
||||
static void classes_do(void f(Klass* const));
|
||||
static void methods_do(void f(Method*));
|
||||
@ -104,6 +110,7 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||
static void dump() { dump_on(tty); }
|
||||
static void verify();
|
||||
|
||||
static bool unload_list_contains(const void* x);
|
||||
#ifndef PRODUCT
|
||||
static bool contains_loader_data(ClassLoaderData* loader_data);
|
||||
#endif
|
||||
@ -136,6 +143,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
};
|
||||
|
||||
friend class ClassLoaderDataGraph;
|
||||
friend class ClassLoaderDataGraphKlassIteratorAtomic;
|
||||
friend class ClassLoaderDataGraphMetaspaceIterator;
|
||||
friend class MetaDataFactory;
|
||||
friend class Method;
|
||||
@ -195,7 +203,6 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
|
||||
void unload();
|
||||
bool keep_alive() const { return _keep_alive; }
|
||||
bool is_alive(BoolObjectClosure* is_alive_closure) const;
|
||||
void classes_do(void f(Klass*));
|
||||
void loaded_classes_do(KlassClosure* klass_closure);
|
||||
void classes_do(void f(InstanceKlass*));
|
||||
@ -208,6 +215,9 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
MetaWord* allocate(size_t size);
|
||||
|
||||
public:
|
||||
|
||||
bool is_alive(BoolObjectClosure* is_alive_closure) const;
|
||||
|
||||
// Accessors
|
||||
Metaspace* metaspace_or_null() const { return _metaspace; }
|
||||
|
||||
@ -293,6 +303,16 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
void initialize_shared_metaspaces();
|
||||
};
|
||||
|
||||
// An iterator that distributes Klasses to parallel worker threads.
|
||||
class ClassLoaderDataGraphKlassIteratorAtomic : public StackObj {
|
||||
volatile Klass* _next_klass;
|
||||
public:
|
||||
ClassLoaderDataGraphKlassIteratorAtomic();
|
||||
Klass* next_klass();
|
||||
private:
|
||||
static Klass* next_klass_in_cldg(Klass* klass);
|
||||
};
|
||||
|
||||
class ClassLoaderDataGraphMetaspaceIterator : public StackObj {
|
||||
ClassLoaderData* _data;
|
||||
public:
|
||||
|
@ -199,6 +199,26 @@ bool Dictionary::do_unloading() {
|
||||
return class_was_unloaded;
|
||||
}
|
||||
|
||||
void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
|
||||
// Skip the strong roots probe marking if the closures are the same.
|
||||
if (strong == weak) {
|
||||
oops_do(strong);
|
||||
return;
|
||||
}
|
||||
|
||||
for (int index = 0; index < table_size(); index++) {
|
||||
for (DictionaryEntry *probe = bucket(index);
|
||||
probe != NULL;
|
||||
probe = probe->next()) {
|
||||
Klass* e = probe->klass();
|
||||
ClassLoaderData* loader_data = probe->loader_data();
|
||||
if (is_strongly_reachable(loader_data, e)) {
|
||||
probe->set_strongly_reachable();
|
||||
}
|
||||
}
|
||||
}
|
||||
_pd_cache_table->roots_oops_do(strong, weak);
|
||||
}
|
||||
|
||||
void Dictionary::always_strong_oops_do(OopClosure* blk) {
|
||||
// Follow all system classes and temporary placeholders in dictionary; only
|
||||
@ -490,6 +510,23 @@ void ProtectionDomainCacheTable::oops_do(OopClosure* f) {
|
||||
}
|
||||
}
|
||||
|
||||
void ProtectionDomainCacheTable::roots_oops_do(OopClosure* strong, OopClosure* weak) {
|
||||
for (int index = 0; index < table_size(); index++) {
|
||||
for (ProtectionDomainCacheEntry* probe = bucket(index);
|
||||
probe != NULL;
|
||||
probe = probe->next()) {
|
||||
if (probe->is_strongly_reachable()) {
|
||||
probe->reset_strongly_reachable();
|
||||
probe->oops_do(strong);
|
||||
} else {
|
||||
if (weak != NULL) {
|
||||
probe->oops_do(weak);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uint ProtectionDomainCacheTable::bucket_size() {
|
||||
return sizeof(ProtectionDomainCacheEntry);
|
||||
}
|
||||
|
@ -89,6 +89,7 @@ public:
|
||||
// GC support
|
||||
void oops_do(OopClosure* f);
|
||||
void always_strong_oops_do(OopClosure* blk);
|
||||
void roots_oops_do(OopClosure* strong, OopClosure* weak);
|
||||
|
||||
void always_strong_classes_do(KlassClosure* closure);
|
||||
|
||||
@ -218,6 +219,7 @@ public:
|
||||
// GC support
|
||||
void oops_do(OopClosure* f);
|
||||
void always_strong_oops_do(OopClosure* f);
|
||||
void roots_oops_do(OopClosure* strong, OopClosure* weak);
|
||||
|
||||
static uint bucket_size();
|
||||
|
||||
|
@ -1239,6 +1239,16 @@ oop java_lang_Throwable::message(Handle throwable) {
|
||||
}
|
||||
|
||||
|
||||
// Return Symbol for detailed_message or NULL
|
||||
Symbol* java_lang_Throwable::detail_message(oop throwable) {
|
||||
PRESERVE_EXCEPTION_MARK; // Keep original exception
|
||||
oop detailed_message = java_lang_Throwable::message(throwable);
|
||||
if (detailed_message != NULL) {
|
||||
return java_lang_String::as_symbol(detailed_message, THREAD);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void java_lang_Throwable::set_message(oop throwable, oop value) {
|
||||
throwable->obj_field_put(detailMessage_offset, value);
|
||||
}
|
||||
|
@ -520,6 +520,7 @@ class java_lang_Throwable: AllStatic {
|
||||
static oop message(oop throwable);
|
||||
static oop message(Handle throwable);
|
||||
static void set_message(oop throwable, oop value);
|
||||
static Symbol* detail_message(oop throwable);
|
||||
static void print_stack_element(outputStream *st, Handle mirror, int method,
|
||||
int version, int bci);
|
||||
static void print_stack_element(outputStream *st, methodHandle method, int bci);
|
||||
|
@ -47,8 +47,11 @@ MetadataOnStackMark::MetadataOnStackMark() {
|
||||
if (_marked_objects == NULL) {
|
||||
_marked_objects = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(1000, true);
|
||||
}
|
||||
|
||||
Threads::metadata_do(Metadata::mark_on_stack);
|
||||
CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
|
||||
if (JvmtiExport::has_redefined_a_class()) {
|
||||
CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
|
||||
}
|
||||
CompileBroker::mark_on_stack();
|
||||
JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack);
|
||||
ThreadService::metadata_do(Metadata::mark_on_stack);
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc_implementation/g1/g1StringDedup.hpp"
|
||||
#endif
|
||||
|
||||
@ -157,11 +158,26 @@ oop StringTable::lookup(Symbol* symbol) {
|
||||
return lookup(chars, length);
|
||||
}
|
||||
|
||||
// Tell the GC that this string was looked up in the StringTable.
|
||||
static void ensure_string_alive(oop string) {
|
||||
// A lookup in the StringTable could return an object that was previously
|
||||
// considered dead. The SATB part of G1 needs to get notified about this
|
||||
// potential resurrection, otherwise the marking might not find the object.
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (UseG1GC && string != NULL) {
|
||||
G1SATBCardTableModRefBS::enqueue(string);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
oop StringTable::lookup(jchar* name, int len) {
|
||||
unsigned int hash = hash_string(name, len);
|
||||
int index = the_table()->hash_to_index(hash);
|
||||
return the_table()->lookup(index, name, len, hash);
|
||||
oop string = the_table()->lookup(index, name, len, hash);
|
||||
|
||||
ensure_string_alive(string);
|
||||
|
||||
return string;
|
||||
}
|
||||
|
||||
|
||||
@ -172,7 +188,10 @@ oop StringTable::intern(Handle string_or_null, jchar* name,
|
||||
oop found_string = the_table()->lookup(index, name, len, hashValue);
|
||||
|
||||
// Found
|
||||
if (found_string != NULL) return found_string;
|
||||
if (found_string != NULL) {
|
||||
ensure_string_alive(found_string);
|
||||
return found_string;
|
||||
}
|
||||
|
||||
debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
|
||||
assert(!Universe::heap()->is_in_reserved(name),
|
||||
@ -197,11 +216,17 @@ oop StringTable::intern(Handle string_or_null, jchar* name,
|
||||
|
||||
// Grab the StringTable_lock before getting the_table() because it could
|
||||
// change at safepoint.
|
||||
MutexLocker ml(StringTable_lock, THREAD);
|
||||
oop added_or_found;
|
||||
{
|
||||
MutexLocker ml(StringTable_lock, THREAD);
|
||||
// Otherwise, add to symbol to table
|
||||
added_or_found = the_table()->basic_add(index, string, name, len,
|
||||
hashValue, CHECK_NULL);
|
||||
}
|
||||
|
||||
// Otherwise, add to symbol to table
|
||||
return the_table()->basic_add(index, string, name, len,
|
||||
hashValue, CHECK_NULL);
|
||||
ensure_string_alive(added_or_found);
|
||||
|
||||
return added_or_found;
|
||||
}
|
||||
|
||||
oop StringTable::intern(Symbol* symbol, TRAPS) {
|
||||
|
@ -1612,13 +1612,7 @@ void SystemDictionary::add_to_hierarchy(instanceKlassHandle k, TRAPS) {
|
||||
// system dictionary and follows the remaining classes' contents.
|
||||
|
||||
void SystemDictionary::always_strong_oops_do(OopClosure* blk) {
|
||||
blk->do_oop(&_java_system_loader);
|
||||
blk->do_oop(&_system_loader_lock_obj);
|
||||
|
||||
dictionary()->always_strong_oops_do(blk);
|
||||
|
||||
// Visit extra methods
|
||||
invoke_method_table()->oops_do(blk);
|
||||
roots_oops_do(blk, NULL);
|
||||
}
|
||||
|
||||
void SystemDictionary::always_strong_classes_do(KlassClosure* closure) {
|
||||
@ -1685,6 +1679,17 @@ bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
|
||||
return unloading_occurred;
|
||||
}
|
||||
|
||||
void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
|
||||
strong->do_oop(&_java_system_loader);
|
||||
strong->do_oop(&_system_loader_lock_obj);
|
||||
|
||||
// Adjust dictionary
|
||||
dictionary()->roots_oops_do(strong, weak);
|
||||
|
||||
// Visit extra methods
|
||||
invoke_method_table()->oops_do(strong);
|
||||
}
|
||||
|
||||
void SystemDictionary::oops_do(OopClosure* f) {
|
||||
f->do_oop(&_java_system_loader);
|
||||
f->do_oop(&_system_loader_lock_obj);
|
||||
|
@ -330,6 +330,7 @@ public:
|
||||
|
||||
// Applies "f->do_oop" to all root oops in the system dictionary.
|
||||
static void oops_do(OopClosure* f);
|
||||
static void roots_oops_do(OopClosure* strong, OopClosure* weak);
|
||||
|
||||
// System loader lock
|
||||
static oop system_loader_lock() { return _system_loader_lock_obj; }
|
||||
|
@ -331,6 +331,11 @@ void CodeCache::blobs_do(CodeBlobClosure* f) {
|
||||
// Walk the list of methods which might contain non-perm oops.
|
||||
void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
|
||||
if (UseG1GC) {
|
||||
return;
|
||||
}
|
||||
|
||||
debug_only(mark_scavenge_root_nmethods());
|
||||
|
||||
for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
|
||||
@ -356,6 +361,11 @@ void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
|
||||
|
||||
void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
|
||||
if (UseG1GC) {
|
||||
return;
|
||||
}
|
||||
|
||||
nm->set_on_scavenge_root_list();
|
||||
nm->set_scavenge_root_link(_scavenge_root_nmethods);
|
||||
set_scavenge_root_nmethods(nm);
|
||||
@ -364,6 +374,11 @@ void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
|
||||
|
||||
void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
|
||||
if (UseG1GC) {
|
||||
return;
|
||||
}
|
||||
|
||||
print_trace("drop_scavenge_root", nm);
|
||||
nmethod* last = NULL;
|
||||
nmethod* cur = scavenge_root_nmethods();
|
||||
@ -385,6 +400,11 @@ void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
|
||||
|
||||
void CodeCache::prune_scavenge_root_nmethods() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
|
||||
if (UseG1GC) {
|
||||
return;
|
||||
}
|
||||
|
||||
debug_only(mark_scavenge_root_nmethods());
|
||||
|
||||
nmethod* last = NULL;
|
||||
@ -417,6 +437,10 @@ void CodeCache::prune_scavenge_root_nmethods() {
|
||||
|
||||
#ifndef PRODUCT
|
||||
void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
|
||||
if (UseG1GC) {
|
||||
return;
|
||||
}
|
||||
|
||||
// While we are here, verify the integrity of the list.
|
||||
mark_scavenge_root_nmethods();
|
||||
for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
|
||||
@ -457,9 +481,36 @@ void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
|
||||
}
|
||||
#endif //PRODUCT
|
||||
|
||||
void CodeCache::verify_clean_inline_caches() {
|
||||
#ifdef ASSERT
|
||||
FOR_ALL_ALIVE_BLOBS(cb) {
|
||||
if (cb->is_nmethod()) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
assert(!nm->is_unloaded(), "Tautology");
|
||||
nm->verify_clean_inline_caches();
|
||||
nm->verify();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void CodeCache::verify_icholder_relocations() {
|
||||
#ifdef ASSERT
|
||||
// make sure that we aren't leaking icholders
|
||||
int count = 0;
|
||||
FOR_ALL_BLOBS(cb) {
|
||||
if (cb->is_nmethod()) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
count += nm->verify_icholder_relocations();
|
||||
}
|
||||
}
|
||||
|
||||
assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
|
||||
CompiledICHolder::live_count(), "must agree");
|
||||
#endif
|
||||
}
|
||||
|
||||
void CodeCache::gc_prologue() {
|
||||
assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
|
||||
}
|
||||
|
||||
void CodeCache::gc_epilogue() {
|
||||
@ -472,41 +523,15 @@ void CodeCache::gc_epilogue() {
|
||||
nm->cleanup_inline_caches();
|
||||
}
|
||||
DEBUG_ONLY(nm->verify());
|
||||
nm->fix_oop_relocations();
|
||||
DEBUG_ONLY(nm->verify_oop_relocations());
|
||||
}
|
||||
}
|
||||
set_needs_cache_clean(false);
|
||||
prune_scavenge_root_nmethods();
|
||||
assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
|
||||
|
||||
#ifdef ASSERT
|
||||
// make sure that we aren't leaking icholders
|
||||
int count = 0;
|
||||
FOR_ALL_BLOBS(cb) {
|
||||
if (cb->is_nmethod()) {
|
||||
RelocIterator iter((nmethod*)cb);
|
||||
while(iter.next()) {
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
|
||||
CompiledIC *ic = CompiledIC_at(iter.reloc());
|
||||
if (TraceCompiledIC) {
|
||||
tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
|
||||
ic->print();
|
||||
}
|
||||
assert(ic->cached_icholder() != NULL, "must be non-NULL");
|
||||
count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
|
||||
CompiledICHolder::live_count(), "must agree");
|
||||
#endif
|
||||
verify_icholder_relocations();
|
||||
}
|
||||
|
||||
|
||||
void CodeCache::verify_oops() {
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
VerifyOopClosure voc;
|
||||
|
@ -134,10 +134,6 @@ class CodeCache : AllStatic {
|
||||
// to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading"
|
||||
// to "true" iff some code got unloaded.
|
||||
static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||
static void oops_do(OopClosure* f) {
|
||||
CodeBlobToOopClosure oopc(f, /*do_marking=*/ false);
|
||||
blobs_do(&oopc);
|
||||
}
|
||||
static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
|
||||
static void scavenge_root_nmethods_do(CodeBlobClosure* f);
|
||||
|
||||
@ -173,6 +169,9 @@ class CodeCache : AllStatic {
|
||||
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
||||
static void clear_inline_caches(); // clear all inline caches
|
||||
|
||||
static void verify_clean_inline_caches();
|
||||
static void verify_icholder_relocations();
|
||||
|
||||
// Deoptimization
|
||||
static int mark_for_deoptimization(DepChange& changes);
|
||||
#ifdef HOTSWAP
|
||||
|
@ -99,13 +99,13 @@ void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub
|
||||
}
|
||||
|
||||
{
|
||||
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
#ifdef ASSERT
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
|
||||
assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
|
||||
assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
|
||||
#endif
|
||||
_ic_call->set_destination_mt_safe(entry_point);
|
||||
}
|
||||
_ic_call->set_destination_mt_safe(entry_point);
|
||||
}
|
||||
|
||||
if (is_optimized() || is_icstub) {
|
||||
// Optimized call sites don't have a cache value and ICStub call
|
||||
@ -159,10 +159,24 @@ address CompiledIC::stub_address() const {
|
||||
//-----------------------------------------------------------------------------
|
||||
// High-level access to an inline cache. Guaranteed to be MT-safe.
|
||||
|
||||
void CompiledIC::initialize_from_iter(RelocIterator* iter) {
|
||||
assert(iter->addr() == _ic_call->instruction_address(), "must find ic_call");
|
||||
|
||||
if (iter->type() == relocInfo::virtual_call_type) {
|
||||
virtual_call_Relocation* r = iter->virtual_call_reloc();
|
||||
_is_optimized = false;
|
||||
_value = nativeMovConstReg_at(r->cached_value());
|
||||
} else {
|
||||
assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
|
||||
_is_optimized = true;
|
||||
_value = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
|
||||
: _ic_call(call)
|
||||
{
|
||||
address ic_call = call->instruction_address();
|
||||
address ic_call = _ic_call->instruction_address();
|
||||
|
||||
assert(ic_call != NULL, "ic_call address must be set");
|
||||
assert(nm != NULL, "must pass nmethod");
|
||||
@ -173,15 +187,21 @@ CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
|
||||
bool ret = iter.next();
|
||||
assert(ret == true, "relocInfo must exist at this address");
|
||||
assert(iter.addr() == ic_call, "must find ic_call");
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
virtual_call_Relocation* r = iter.virtual_call_reloc();
|
||||
_is_optimized = false;
|
||||
_value = nativeMovConstReg_at(r->cached_value());
|
||||
} else {
|
||||
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
|
||||
_is_optimized = true;
|
||||
_value = NULL;
|
||||
}
|
||||
|
||||
initialize_from_iter(&iter);
|
||||
}
|
||||
|
||||
CompiledIC::CompiledIC(RelocIterator* iter)
|
||||
: _ic_call(nativeCall_at(iter->addr()))
|
||||
{
|
||||
address ic_call = _ic_call->instruction_address();
|
||||
|
||||
nmethod* nm = iter->code();
|
||||
assert(ic_call != NULL, "ic_call address must be set");
|
||||
assert(nm != NULL, "must pass nmethod");
|
||||
assert(nm->contains(ic_call), "must be in nmethod");
|
||||
|
||||
initialize_from_iter(iter);
|
||||
}
|
||||
|
||||
bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
|
||||
@ -509,7 +529,7 @@ bool CompiledIC::is_icholder_entry(address entry) {
|
||||
void CompiledStaticCall::set_to_clean() {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
|
||||
// Reset call site
|
||||
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
#ifdef ASSERT
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(this);
|
||||
assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
|
||||
|
@ -136,6 +136,9 @@ class CompiledIC: public ResourceObj {
|
||||
bool _is_optimized; // an optimized virtual call (i.e., no compiled IC)
|
||||
|
||||
CompiledIC(nmethod* nm, NativeCall* ic_call);
|
||||
CompiledIC(RelocIterator* iter);
|
||||
|
||||
void initialize_from_iter(RelocIterator* iter);
|
||||
|
||||
static bool is_icholder_entry(address entry);
|
||||
|
||||
@ -169,6 +172,7 @@ class CompiledIC: public ResourceObj {
|
||||
friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr);
|
||||
friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site);
|
||||
friend CompiledIC* CompiledIC_at(Relocation* call_site);
|
||||
friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
|
||||
|
||||
// This is used to release CompiledICHolder*s from nmethods that
|
||||
// are about to be freed. The callsite might contain other stale
|
||||
@ -249,6 +253,13 @@ inline CompiledIC* CompiledIC_at(Relocation* call_site) {
|
||||
return c_ic;
|
||||
}
|
||||
|
||||
inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) {
|
||||
assert(reloc_iter->type() == relocInfo::virtual_call_type ||
|
||||
reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
|
||||
CompiledIC* c_ic = new CompiledIC(reloc_iter);
|
||||
c_ic->verify();
|
||||
return c_ic;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// The CompiledStaticCall represents a call to a static method in the compiled
|
||||
|
@ -51,6 +51,8 @@
|
||||
|
||||
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
||||
|
||||
unsigned char nmethod::_global_unloading_clock = 0;
|
||||
|
||||
#ifdef DTRACE_ENABLED
|
||||
|
||||
// Only bother with this argument setup if dtrace is available
|
||||
@ -446,6 +448,7 @@ const char* nmethod::compile_kind() const {
|
||||
// Fill in default values for various flag fields
|
||||
void nmethod::init_defaults() {
|
||||
_state = in_use;
|
||||
_unloading_clock = 0;
|
||||
_marked_for_reclamation = 0;
|
||||
_has_flushed_dependencies = 0;
|
||||
_has_unsafe_access = 0;
|
||||
@ -464,7 +467,11 @@ void nmethod::init_defaults() {
|
||||
_oops_do_mark_link = NULL;
|
||||
_jmethod_id = NULL;
|
||||
_osr_link = NULL;
|
||||
_scavenge_root_link = NULL;
|
||||
if (UseG1GC) {
|
||||
_unloading_next = NULL;
|
||||
} else {
|
||||
_scavenge_root_link = NULL;
|
||||
}
|
||||
_scavenge_root_state = 0;
|
||||
_compiler = NULL;
|
||||
#if INCLUDE_RTM_OPT
|
||||
@ -1146,7 +1153,7 @@ void nmethod::cleanup_inline_caches() {
|
||||
switch(iter.type()) {
|
||||
case relocInfo::virtual_call_type:
|
||||
case relocInfo::opt_virtual_call_type: {
|
||||
CompiledIC *ic = CompiledIC_at(iter.reloc());
|
||||
CompiledIC *ic = CompiledIC_at(&iter);
|
||||
// Ok, to lookup references to zombies here
|
||||
CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
|
||||
if( cb != NULL && cb->is_nmethod() ) {
|
||||
@ -1170,6 +1177,77 @@ void nmethod::cleanup_inline_caches() {
|
||||
}
|
||||
}
|
||||
|
||||
void nmethod::verify_clean_inline_caches() {
|
||||
assert_locked_or_safepoint(CompiledIC_lock);
|
||||
|
||||
// If the method is not entrant or zombie then a JMP is plastered over the
|
||||
// first few bytes. If an oop in the old code was there, that oop
|
||||
// should not get GC'd. Skip the first few bytes of oops on
|
||||
// not-entrant methods.
|
||||
address low_boundary = verified_entry_point();
|
||||
if (!is_in_use()) {
|
||||
low_boundary += NativeJump::instruction_size;
|
||||
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
|
||||
// This means that the low_boundary is going to be a little too high.
|
||||
// This shouldn't matter, since oops of non-entrant methods are never used.
|
||||
// In fact, why are we bothering to look at oops in a non-entrant method??
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
RelocIterator iter(this, low_boundary);
|
||||
while(iter.next()) {
|
||||
switch(iter.type()) {
|
||||
case relocInfo::virtual_call_type:
|
||||
case relocInfo::opt_virtual_call_type: {
|
||||
CompiledIC *ic = CompiledIC_at(&iter);
|
||||
// Ok, to lookup references to zombies here
|
||||
CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
|
||||
if( cb != NULL && cb->is_nmethod() ) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
// Verify that inline caches pointing to both zombie and not_entrant methods are clean
|
||||
if (!nm->is_in_use() || (nm->method()->code() != nm)) {
|
||||
assert(ic->is_clean(), "IC should be clean");
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case relocInfo::static_call_type: {
|
||||
CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
|
||||
CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
|
||||
if( cb != NULL && cb->is_nmethod() ) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
// Verify that inline caches pointing to both zombie and not_entrant methods are clean
|
||||
if (!nm->is_in_use() || (nm->method()->code() != nm)) {
|
||||
assert(csc->is_clean(), "IC should be clean");
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int nmethod::verify_icholder_relocations() {
|
||||
int count = 0;
|
||||
|
||||
RelocIterator iter(this);
|
||||
while(iter.next()) {
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
|
||||
CompiledIC *ic = CompiledIC_at(&iter);
|
||||
if (TraceCompiledIC) {
|
||||
tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
|
||||
ic->print();
|
||||
}
|
||||
assert(ic->cached_icholder() != NULL, "must be non-NULL");
|
||||
count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
// This is a private interface with the sweeper.
|
||||
void nmethod::mark_as_seen_on_stack() {
|
||||
assert(is_alive(), "Must be an alive method");
|
||||
@ -1202,6 +1280,23 @@ void nmethod::inc_decompile_count() {
|
||||
mdo->inc_decompile_count();
|
||||
}
|
||||
|
||||
void nmethod::increase_unloading_clock() {
|
||||
_global_unloading_clock++;
|
||||
if (_global_unloading_clock == 0) {
|
||||
// _nmethods are allocated with _unloading_clock == 0,
|
||||
// so 0 is never used as a clock value.
|
||||
_global_unloading_clock = 1;
|
||||
}
|
||||
}
|
||||
|
||||
void nmethod::set_unloading_clock(unsigned char unloading_clock) {
|
||||
OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
|
||||
}
|
||||
|
||||
unsigned char nmethod::unloading_clock() {
|
||||
return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
|
||||
}
|
||||
|
||||
void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
|
||||
|
||||
post_compiled_method_unload();
|
||||
@ -1247,6 +1342,10 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
|
||||
// for later on.
|
||||
CodeCache::set_needs_cache_clean(true);
|
||||
}
|
||||
|
||||
// Unregister must be done before the state change
|
||||
Universe::heap()->unregister_nmethod(this);
|
||||
|
||||
_state = unloaded;
|
||||
|
||||
// Log the unloading.
|
||||
@ -1590,6 +1689,35 @@ void nmethod::post_compiled_method_unload() {
|
||||
set_unload_reported();
|
||||
}
|
||||
|
||||
void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
|
||||
if (ic->is_icholder_call()) {
|
||||
// The only exception is compiledICHolder oops which may
|
||||
// yet be marked below. (We check this further below).
|
||||
CompiledICHolder* cichk_oop = ic->cached_icholder();
|
||||
if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
|
||||
cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
Metadata* ic_oop = ic->cached_metadata();
|
||||
if (ic_oop != NULL) {
|
||||
if (ic_oop->is_klass()) {
|
||||
if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
|
||||
return;
|
||||
}
|
||||
} else if (ic_oop->is_method()) {
|
||||
if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ic->set_to_clean();
|
||||
}
|
||||
|
||||
// This is called at the end of the strong tracing/marking phase of a
|
||||
// GC to unload an nmethod if it contains otherwise unreachable
|
||||
// oops.
|
||||
@ -1632,32 +1760,8 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred)
|
||||
RelocIterator iter(this, low_boundary);
|
||||
while(iter.next()) {
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
CompiledIC *ic = CompiledIC_at(iter.reloc());
|
||||
if (ic->is_icholder_call()) {
|
||||
// The only exception is compiledICHolder oops which may
|
||||
// yet be marked below. (We check this further below).
|
||||
CompiledICHolder* cichk_oop = ic->cached_icholder();
|
||||
if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
|
||||
cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
Metadata* ic_oop = ic->cached_metadata();
|
||||
if (ic_oop != NULL) {
|
||||
if (ic_oop->is_klass()) {
|
||||
if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
|
||||
continue;
|
||||
}
|
||||
} else if (ic_oop->is_method()) {
|
||||
if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
}
|
||||
ic->set_to_clean();
|
||||
CompiledIC *ic = CompiledIC_at(&iter);
|
||||
clean_ic_if_metadata_is_dead(ic, is_alive);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1695,6 +1799,175 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred)
|
||||
verify_metadata_loaders(low_boundary, is_alive);
|
||||
}
|
||||
|
||||
template <class CompiledICorStaticCall>
|
||||
static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) {
|
||||
// Ok, to lookup references to zombies here
|
||||
CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
|
||||
if (cb != NULL && cb->is_nmethod()) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
|
||||
if (nm->unloading_clock() != nmethod::global_unloading_clock()) {
|
||||
// The nmethod has not been processed yet.
|
||||
return true;
|
||||
}
|
||||
|
||||
// Clean inline caches pointing to both zombie and not_entrant methods
|
||||
if (!nm->is_in_use() || (nm->method()->code() != nm)) {
|
||||
ic->set_to_clean();
|
||||
assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string()));
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
|
||||
return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
|
||||
}
|
||||
|
||||
static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
|
||||
return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
|
||||
}
|
||||
|
||||
bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
|
||||
ResourceMark rm;
|
||||
|
||||
// Make sure the oop's ready to receive visitors
|
||||
assert(!is_zombie() && !is_unloaded(),
|
||||
"should not call follow on zombie or unloaded nmethod");
|
||||
|
||||
// If the method is not entrant then a JMP is plastered over the
|
||||
// first few bytes. If an oop in the old code was there, that oop
|
||||
// should not get GC'd. Skip the first few bytes of oops on
|
||||
// not-entrant methods.
|
||||
address low_boundary = verified_entry_point();
|
||||
if (is_not_entrant()) {
|
||||
low_boundary += NativeJump::instruction_size;
|
||||
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
|
||||
// (See comment above.)
|
||||
}
|
||||
|
||||
// The RedefineClasses() API can cause the class unloading invariant
|
||||
// to no longer be true. See jvmtiExport.hpp for details.
|
||||
// Also, leave a debugging breadcrumb in local flag.
|
||||
bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
|
||||
if (a_class_was_redefined) {
|
||||
// This set of the unloading_occurred flag is done before the
|
||||
// call to post_compiled_method_unload() so that the unloading
|
||||
// of this nmethod is reported.
|
||||
unloading_occurred = true;
|
||||
}
|
||||
|
||||
// Exception cache
|
||||
clean_exception_cache(is_alive);
|
||||
|
||||
bool is_unloaded = false;
|
||||
bool postponed = false;
|
||||
|
||||
RelocIterator iter(this, low_boundary);
|
||||
while(iter.next()) {
|
||||
|
||||
switch (iter.type()) {
|
||||
|
||||
case relocInfo::virtual_call_type:
|
||||
if (unloading_occurred) {
|
||||
// If class unloading occurred we first iterate over all inline caches and
|
||||
// clear ICs where the cached oop is referring to an unloaded klass or method.
|
||||
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
|
||||
}
|
||||
|
||||
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
|
||||
break;
|
||||
|
||||
case relocInfo::opt_virtual_call_type:
|
||||
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
|
||||
break;
|
||||
|
||||
case relocInfo::static_call_type:
|
||||
postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
|
||||
break;
|
||||
|
||||
case relocInfo::oop_type:
|
||||
if (!is_unloaded) {
|
||||
// Unload check
|
||||
oop_Relocation* r = iter.oop_reloc();
|
||||
// Traverse those oops directly embedded in the code.
|
||||
// Other oops (oop_index>0) are seen as part of scopes_oops.
|
||||
assert(1 == (r->oop_is_immediate()) +
|
||||
(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
|
||||
"oop must be found in exactly one place");
|
||||
if (r->oop_is_immediate() && r->oop_value() != NULL) {
|
||||
if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
|
||||
is_unloaded = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if (is_unloaded) {
|
||||
return postponed;
|
||||
}
|
||||
|
||||
// Scopes
|
||||
for (oop* p = oops_begin(); p < oops_end(); p++) {
|
||||
if (*p == Universe::non_oop_word()) continue; // skip non-oops
|
||||
if (can_unload(is_alive, p, unloading_occurred)) {
|
||||
is_unloaded = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (is_unloaded) {
|
||||
return postponed;
|
||||
}
|
||||
|
||||
// Ensure that all metadata is still alive
|
||||
verify_metadata_loaders(low_boundary, is_alive);
|
||||
|
||||
return postponed;
|
||||
}
|
||||
|
||||
void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
|
||||
ResourceMark rm;
|
||||
|
||||
// Make sure the oop's ready to receive visitors
|
||||
assert(!is_zombie(),
|
||||
"should not call follow on zombie nmethod");
|
||||
|
||||
// If the method is not entrant then a JMP is plastered over the
|
||||
// first few bytes. If an oop in the old code was there, that oop
|
||||
// should not get GC'd. Skip the first few bytes of oops on
|
||||
// not-entrant methods.
|
||||
address low_boundary = verified_entry_point();
|
||||
if (is_not_entrant()) {
|
||||
low_boundary += NativeJump::instruction_size;
|
||||
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
|
||||
// (See comment above.)
|
||||
}
|
||||
|
||||
RelocIterator iter(this, low_boundary);
|
||||
while(iter.next()) {
|
||||
|
||||
switch (iter.type()) {
|
||||
|
||||
case relocInfo::virtual_call_type:
|
||||
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
|
||||
break;
|
||||
|
||||
case relocInfo::opt_virtual_call_type:
|
||||
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
|
||||
break;
|
||||
|
||||
case relocInfo::static_call_type:
|
||||
clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
|
||||
class CheckClass : AllStatic {
|
||||
@ -1741,7 +2014,7 @@ void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* i
|
||||
// compiled code is maintaining a link to dead metadata.
|
||||
address static_call_addr = NULL;
|
||||
if (iter.type() == relocInfo::opt_virtual_call_type) {
|
||||
CompiledIC* cic = CompiledIC_at(iter.reloc());
|
||||
CompiledIC* cic = CompiledIC_at(&iter);
|
||||
if (!cic->is_call_to_interpreted()) {
|
||||
static_call_addr = iter.addr();
|
||||
}
|
||||
@ -1793,7 +2066,7 @@ void nmethod::metadata_do(void f(Metadata*)) {
|
||||
}
|
||||
} else if (iter.type() == relocInfo::virtual_call_type) {
|
||||
// Check compiledIC holders associated with this nmethod
|
||||
CompiledIC *ic = CompiledIC_at(iter.reloc());
|
||||
CompiledIC *ic = CompiledIC_at(&iter);
|
||||
if (ic->is_icholder_call()) {
|
||||
CompiledICHolder* cichk = ic->cached_icholder();
|
||||
f(cichk->holder_method());
|
||||
@ -1911,7 +2184,7 @@ void nmethod::oops_do_marking_epilogue() {
|
||||
assert(cur != NULL, "not NULL-terminated");
|
||||
nmethod* next = cur->_oops_do_mark_link;
|
||||
cur->_oops_do_mark_link = NULL;
|
||||
cur->fix_oop_relocations();
|
||||
cur->verify_oop_relocations();
|
||||
NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark"));
|
||||
cur = next;
|
||||
}
|
||||
@ -2479,6 +2752,10 @@ public:
|
||||
};
|
||||
|
||||
void nmethod::verify_scavenge_root_oops() {
|
||||
if (UseG1GC) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!on_scavenge_root_list()) {
|
||||
// Actually look inside, to verify the claim that it's clean.
|
||||
DebugScavengeRoot debug_scavenge_root(this);
|
||||
@ -2922,7 +3199,7 @@ void nmethod::print_calls(outputStream* st) {
|
||||
case relocInfo::virtual_call_type:
|
||||
case relocInfo::opt_virtual_call_type: {
|
||||
VerifyMutexLocker mc(CompiledIC_lock);
|
||||
CompiledIC_at(iter.reloc())->print();
|
||||
CompiledIC_at(&iter)->print();
|
||||
break;
|
||||
}
|
||||
case relocInfo::static_call_type:
|
||||
|
@ -111,6 +111,11 @@ class nmethod : public CodeBlob {
|
||||
friend class NMethodSweeper;
|
||||
friend class CodeCache; // scavengable oops
|
||||
private:
|
||||
|
||||
// GC support to help figure out if an nmethod has been
|
||||
// cleaned/unloaded by the current GC.
|
||||
static unsigned char _global_unloading_clock;
|
||||
|
||||
// Shared fields for all nmethod's
|
||||
Method* _method;
|
||||
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
|
||||
@ -118,7 +123,13 @@ class nmethod : public CodeBlob {
|
||||
|
||||
// To support simple linked-list chaining of nmethods:
|
||||
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
|
||||
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
|
||||
|
||||
union {
|
||||
// Used by G1 to chain nmethods.
|
||||
nmethod* _unloading_next;
|
||||
// Used by non-G1 GCs to chain nmethods.
|
||||
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
|
||||
};
|
||||
|
||||
static nmethod* volatile _oops_do_mark_nmethods;
|
||||
nmethod* volatile _oops_do_mark_link;
|
||||
@ -180,6 +191,8 @@ class nmethod : public CodeBlob {
|
||||
// Protected by Patching_lock
|
||||
volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded}
|
||||
|
||||
volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod
|
||||
|
||||
#ifdef ASSERT
|
||||
bool _oops_are_stale; // indicates that it's no longer safe to access oops section
|
||||
#endif
|
||||
@ -437,6 +450,15 @@ class nmethod : public CodeBlob {
|
||||
bool unload_reported() { return _unload_reported; }
|
||||
void set_unload_reported() { _unload_reported = true; }
|
||||
|
||||
void set_unloading_next(nmethod* next) { _unloading_next = next; }
|
||||
nmethod* unloading_next() { return _unloading_next; }
|
||||
|
||||
static unsigned char global_unloading_clock() { return _global_unloading_clock; }
|
||||
static void increase_unloading_clock();
|
||||
|
||||
void set_unloading_clock(unsigned char unloading_clock);
|
||||
unsigned char unloading_clock();
|
||||
|
||||
bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; }
|
||||
void mark_for_deoptimization() { _marked_for_deoptimization = true; }
|
||||
|
||||
@ -552,6 +574,10 @@ public:
|
||||
return (addr >= code_begin() && addr < verified_entry_point());
|
||||
}
|
||||
|
||||
// Verify calls to dead methods have been cleaned.
|
||||
void verify_clean_inline_caches();
|
||||
// Verify and count cached icholder relocations.
|
||||
int verify_icholder_relocations();
|
||||
// Check that all metadata is still alive
|
||||
void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
|
||||
|
||||
@ -577,6 +603,10 @@ public:
|
||||
|
||||
// GC support
|
||||
void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||
// The parallel versions are used by G1.
|
||||
bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||
void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||
// Unload a nmethod if the *root object is dead.
|
||||
bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
|
||||
|
||||
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "code/vtableStubs.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "compiler/disassembler.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
@ -62,6 +63,7 @@ void* VtableStub::operator new(size_t size, int code_size) throw() {
|
||||
// If changing the name, update the other file accordingly.
|
||||
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
|
||||
if (blob == NULL) {
|
||||
CompileBroker::handle_full_code_cache();
|
||||
return NULL;
|
||||
}
|
||||
_chunk = blob->content_begin();
|
||||
|
@ -1048,7 +1048,7 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
|
||||
}
|
||||
|
||||
// Let go of Threads_lock before yielding
|
||||
os::yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
|
||||
os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
|
||||
|
||||
return compiler_thread;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,477 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP
|
||||
|
||||
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
|
||||
#include "runtime/timer.hpp"
|
||||
|
||||
// This class keeps statistical information and computes the
|
||||
// size of the heap for the concurrent mark sweep collector.
|
||||
//
|
||||
// Cost for garbage collector include cost for
|
||||
// minor collection
|
||||
// concurrent collection
|
||||
// stop-the-world component
|
||||
// concurrent component
|
||||
// major compacting collection
|
||||
// uses decaying cost
|
||||
|
||||
// Forward decls
|
||||
class elapsedTimer;
|
||||
|
||||
class CMSAdaptiveSizePolicy : public AdaptiveSizePolicy {
|
||||
friend class CMSGCAdaptivePolicyCounters;
|
||||
friend class CMSCollector;
|
||||
private:
|
||||
|
||||
// Total number of processors available
|
||||
int _processor_count;
|
||||
// Number of processors used by the concurrent phases of GC
|
||||
// This number is assumed to be the same for all concurrent
|
||||
// phases.
|
||||
int _concurrent_processor_count;
|
||||
|
||||
// Time that the mutators run exclusive of a particular
|
||||
// phase. For example, the time the mutators run excluding
|
||||
// the time during which the cms collector runs concurrently
|
||||
// with the mutators.
|
||||
// Between end of most recent cms reset and start of initial mark
|
||||
// This may be redundant
|
||||
double _latest_cms_reset_end_to_initial_mark_start_secs;
|
||||
// Between end of the most recent initial mark and start of remark
|
||||
double _latest_cms_initial_mark_end_to_remark_start_secs;
|
||||
// Between end of most recent collection and start of
|
||||
// a concurrent collection
|
||||
double _latest_cms_collection_end_to_collection_start_secs;
|
||||
// Times of the concurrent phases of the most recent
|
||||
// concurrent collection
|
||||
double _latest_cms_concurrent_marking_time_secs;
|
||||
double _latest_cms_concurrent_precleaning_time_secs;
|
||||
double _latest_cms_concurrent_sweeping_time_secs;
|
||||
// Between end of most recent STW MSC and start of next STW MSC
|
||||
double _latest_cms_msc_end_to_msc_start_time_secs;
|
||||
// Between end of most recent MS and start of next MS
|
||||
// This does not include any time spent during a concurrent
|
||||
// collection.
|
||||
double _latest_cms_ms_end_to_ms_start;
|
||||
// Between start and end of the initial mark of the most recent
|
||||
// concurrent collection.
|
||||
double _latest_cms_initial_mark_start_to_end_time_secs;
|
||||
// Between start and end of the remark phase of the most recent
|
||||
// concurrent collection
|
||||
double _latest_cms_remark_start_to_end_time_secs;
|
||||
// Between start and end of the most recent MS STW marking phase
|
||||
double _latest_cms_ms_marking_start_to_end_time_secs;
|
||||
|
||||
// Pause time timers
|
||||
static elapsedTimer _STW_timer;
|
||||
// Concurrent collection timer. Used for total of all concurrent phases
|
||||
// during 1 collection cycle.
|
||||
static elapsedTimer _concurrent_timer;
|
||||
|
||||
// When the size of the generation is changed, the size
|
||||
// of the change will rounded up or down (depending on the
|
||||
// type of change) by this value.
|
||||
size_t _generation_alignment;
|
||||
|
||||
// If this variable is true, the size of the young generation
|
||||
// may be changed in order to reduce the pause(s) of the
|
||||
// collection of the tenured generation in order to meet the
|
||||
// pause time goal. It is common to change the size of the
|
||||
// tenured generation in order to meet the pause time goal
|
||||
// for the tenured generation. With the CMS collector for
|
||||
// the tenured generation, the size of the young generation
|
||||
// can have an significant affect on the pause times for collecting the
|
||||
// tenured generation.
|
||||
// This is a duplicate of a variable in PSAdaptiveSizePolicy. It
|
||||
// is duplicated because it is not clear that it is general enough
|
||||
// to go into AdaptiveSizePolicy.
|
||||
int _change_young_gen_for_maj_pauses;
|
||||
|
||||
// Variable that is set to true after a collection.
|
||||
bool _first_after_collection;
|
||||
|
||||
// Fraction of collections that are of each type
|
||||
double concurrent_fraction() const;
|
||||
double STW_msc_fraction() const;
|
||||
double STW_ms_fraction() const;
|
||||
|
||||
// This call cannot be put into the epilogue as long as some
|
||||
// of the counters can be set during concurrent phases.
|
||||
virtual void clear_generation_free_space_flags();
|
||||
|
||||
void set_first_after_collection() { _first_after_collection = true; }
|
||||
|
||||
protected:
|
||||
// Average of the sum of the concurrent times for
|
||||
// one collection in seconds.
|
||||
AdaptiveWeightedAverage* _avg_concurrent_time;
|
||||
// Average time between concurrent collections in seconds.
|
||||
AdaptiveWeightedAverage* _avg_concurrent_interval;
|
||||
// Average cost of the concurrent part of a collection
|
||||
// in seconds.
|
||||
AdaptiveWeightedAverage* _avg_concurrent_gc_cost;
|
||||
|
||||
// Average of the initial pause of a concurrent collection in seconds.
|
||||
AdaptivePaddedAverage* _avg_initial_pause;
|
||||
// Average of the remark pause of a concurrent collection in seconds.
|
||||
AdaptivePaddedAverage* _avg_remark_pause;
|
||||
|
||||
// Average of the stop-the-world (STW) (initial mark + remark)
|
||||
// times in seconds for concurrent collections.
|
||||
AdaptiveWeightedAverage* _avg_cms_STW_time;
|
||||
// Average of the STW collection cost for concurrent collections.
|
||||
AdaptiveWeightedAverage* _avg_cms_STW_gc_cost;
|
||||
|
||||
// Average of the bytes free at the start of the sweep.
|
||||
AdaptiveWeightedAverage* _avg_cms_free_at_sweep;
|
||||
// Average of the bytes free at the end of the collection.
|
||||
AdaptiveWeightedAverage* _avg_cms_free;
|
||||
// Average of the bytes promoted between cms collections.
|
||||
AdaptiveWeightedAverage* _avg_cms_promo;
|
||||
|
||||
// stop-the-world (STW) mark-sweep-compact
|
||||
// Average of the pause time in seconds for STW mark-sweep-compact
|
||||
// collections.
|
||||
AdaptiveWeightedAverage* _avg_msc_pause;
|
||||
// Average of the interval in seconds between STW mark-sweep-compact
|
||||
// collections.
|
||||
AdaptiveWeightedAverage* _avg_msc_interval;
|
||||
// Average of the collection costs for STW mark-sweep-compact
|
||||
// collections.
|
||||
AdaptiveWeightedAverage* _avg_msc_gc_cost;
|
||||
|
||||
// Averages for mark-sweep collections.
|
||||
// The collection may have started as a background collection
|
||||
// that completes in a stop-the-world (STW) collection.
|
||||
// Average of the pause time in seconds for mark-sweep
|
||||
// collections.
|
||||
AdaptiveWeightedAverage* _avg_ms_pause;
|
||||
// Average of the interval in seconds between mark-sweep
|
||||
// collections.
|
||||
AdaptiveWeightedAverage* _avg_ms_interval;
|
||||
// Average of the collection costs for mark-sweep
|
||||
// collections.
|
||||
AdaptiveWeightedAverage* _avg_ms_gc_cost;
|
||||
|
||||
// These variables contain a linear fit of
|
||||
// a generation size as the independent variable
|
||||
// and a pause time as the dependent variable.
|
||||
// For example _remark_pause_old_estimator
|
||||
// is a fit of the old generation size as the
|
||||
// independent variable and the remark pause
|
||||
// as the dependent variable.
|
||||
// remark pause time vs. cms gen size
|
||||
LinearLeastSquareFit* _remark_pause_old_estimator;
|
||||
// initial pause time vs. cms gen size
|
||||
LinearLeastSquareFit* _initial_pause_old_estimator;
|
||||
// remark pause time vs. young gen size
|
||||
LinearLeastSquareFit* _remark_pause_young_estimator;
|
||||
// initial pause time vs. young gen size
|
||||
LinearLeastSquareFit* _initial_pause_young_estimator;
|
||||
|
||||
// Accessors
|
||||
int processor_count() const { return _processor_count; }
|
||||
int concurrent_processor_count() const { return _concurrent_processor_count; }
|
||||
|
||||
AdaptiveWeightedAverage* avg_concurrent_time() const {
|
||||
return _avg_concurrent_time;
|
||||
}
|
||||
|
||||
AdaptiveWeightedAverage* avg_concurrent_interval() const {
|
||||
return _avg_concurrent_interval;
|
||||
}
|
||||
|
||||
AdaptiveWeightedAverage* avg_concurrent_gc_cost() const {
|
||||
return _avg_concurrent_gc_cost;
|
||||
}
|
||||
|
||||
AdaptiveWeightedAverage* avg_cms_STW_time() const {
|
||||
return _avg_cms_STW_time;
|
||||
}
|
||||
|
||||
AdaptiveWeightedAverage* avg_cms_STW_gc_cost() const {
|
||||
return _avg_cms_STW_gc_cost;
|
||||
}
|
||||
|
||||
AdaptivePaddedAverage* avg_initial_pause() const {
|
||||
return _avg_initial_pause;
|
||||
}
|
||||
|
||||
AdaptivePaddedAverage* avg_remark_pause() const {
|
||||
return _avg_remark_pause;
|
||||
}
|
||||
|
||||
AdaptiveWeightedAverage* avg_cms_free() const {
|
||||
return _avg_cms_free;
|
||||
}
|
||||
|
||||
AdaptiveWeightedAverage* avg_cms_free_at_sweep() const {
|
||||
return _avg_cms_free_at_sweep;
|
||||
}
|
||||
|
||||
AdaptiveWeightedAverage* avg_msc_pause() const {
|
||||
return _avg_msc_pause;
|
||||
}
|
||||
|
||||
AdaptiveWeightedAverage* avg_msc_interval() const {
|
||||
return _avg_msc_interval;
|
||||
}
|
||||
|
||||
AdaptiveWeightedAverage* avg_msc_gc_cost() const {
|
||||
return _avg_msc_gc_cost;
|
||||
}
|
||||
|
||||
AdaptiveWeightedAverage* avg_ms_pause() const {
|
||||
return _avg_ms_pause;
|
||||
}
|
||||
|
||||
AdaptiveWeightedAverage* avg_ms_interval() const {
|
||||
return _avg_ms_interval;
|
||||
}
|
||||
|
||||
AdaptiveWeightedAverage* avg_ms_gc_cost() const {
|
||||
return _avg_ms_gc_cost;
|
||||
}
|
||||
|
||||
LinearLeastSquareFit* remark_pause_old_estimator() {
|
||||
return _remark_pause_old_estimator;
|
||||
}
|
||||
LinearLeastSquareFit* initial_pause_old_estimator() {
|
||||
return _initial_pause_old_estimator;
|
||||
}
|
||||
LinearLeastSquareFit* remark_pause_young_estimator() {
|
||||
return _remark_pause_young_estimator;
|
||||
}
|
||||
LinearLeastSquareFit* initial_pause_young_estimator() {
|
||||
return _initial_pause_young_estimator;
|
||||
}
|
||||
|
||||
// These *slope() methods return the slope
|
||||
// m for the linear fit of an independent
|
||||
// variable vs. a dependent variable. For
|
||||
// example
|
||||
// remark_pause = m * old_generation_size + c
|
||||
// These may be used to determine if an
|
||||
// adjustment should be made to achieve a goal.
|
||||
// For example, if remark_pause_old_slope() is
|
||||
// positive, a reduction of the old generation
|
||||
// size has on average resulted in the reduction
|
||||
// of the remark pause.
|
||||
float remark_pause_old_slope() {
|
||||
return _remark_pause_old_estimator->slope();
|
||||
}
|
||||
|
||||
float initial_pause_old_slope() {
|
||||
return _initial_pause_old_estimator->slope();
|
||||
}
|
||||
|
||||
float remark_pause_young_slope() {
|
||||
return _remark_pause_young_estimator->slope();
|
||||
}
|
||||
|
||||
float initial_pause_young_slope() {
|
||||
return _initial_pause_young_estimator->slope();
|
||||
}
|
||||
|
||||
// Update estimators
|
||||
void update_minor_pause_old_estimator(double minor_pause_in_ms);
|
||||
|
||||
// Fraction of processors used by the concurrent phases.
|
||||
double concurrent_processor_fraction();
|
||||
|
||||
// Returns the total times for the concurrent part of the
|
||||
// latest collection in seconds.
|
||||
double concurrent_collection_time();
|
||||
|
||||
// Return the total times for the concurrent part of the
|
||||
// latest collection in seconds where the times of the various
|
||||
// concurrent phases are scaled by the processor fraction used
|
||||
// during the phase.
|
||||
double scaled_concurrent_collection_time();
|
||||
|
||||
// Dimensionless concurrent GC cost for all the concurrent phases.
|
||||
double concurrent_collection_cost(double interval_in_seconds);
|
||||
|
||||
// Dimensionless GC cost
|
||||
double collection_cost(double pause_in_seconds, double interval_in_seconds);
|
||||
|
||||
virtual GCPolicyKind kind() const { return _gc_cms_adaptive_size_policy; }
|
||||
|
||||
virtual double time_since_major_gc() const;
|
||||
|
||||
// This returns the maximum average for the concurrent, ms, and
|
||||
// msc collections. This is meant to be used for the calculation
|
||||
// of the decayed major gc cost and is not in general the
|
||||
// average of all the different types of major collections.
|
||||
virtual double major_gc_interval_average_for_decay() const;
|
||||
|
||||
public:
|
||||
CMSAdaptiveSizePolicy(size_t init_eden_size,
|
||||
size_t init_promo_size,
|
||||
size_t init_survivor_size,
|
||||
double max_gc_minor_pause_sec,
|
||||
double max_gc_pause_sec,
|
||||
uint gc_cost_ratio);
|
||||
|
||||
// The timers for the stop-the-world phases measure a total
|
||||
// stop-the-world time. The timer is started and stopped
|
||||
// for each phase but is only reset after the final checkpoint.
|
||||
void checkpoint_roots_initial_begin();
|
||||
void checkpoint_roots_initial_end(GCCause::Cause gc_cause);
|
||||
void checkpoint_roots_final_begin();
|
||||
void checkpoint_roots_final_end(GCCause::Cause gc_cause);
|
||||
|
||||
// Methods for gathering information about the
|
||||
// concurrent marking phase of the collection.
|
||||
// Records the mutator times and
|
||||
// resets the concurrent timer.
|
||||
void concurrent_marking_begin();
|
||||
// Resets concurrent phase timer in the begin methods and
|
||||
// saves the time for a phase in the end methods.
|
||||
void concurrent_marking_end();
|
||||
void concurrent_sweeping_begin();
|
||||
void concurrent_sweeping_end();
|
||||
// Similar to the above (e.g., concurrent_marking_end()) and
|
||||
// is used for both the precleaning an abortable precleaning
|
||||
// phases.
|
||||
void concurrent_precleaning_begin();
|
||||
void concurrent_precleaning_end();
|
||||
// Stops the concurrent phases time. Gathers
|
||||
// information and resets the timer.
|
||||
void concurrent_phases_end(GCCause::Cause gc_cause,
|
||||
size_t cur_eden,
|
||||
size_t cur_promo);
|
||||
|
||||
// Methods for gather information about STW Mark-Sweep-Compact
|
||||
void msc_collection_begin();
|
||||
void msc_collection_end(GCCause::Cause gc_cause);
|
||||
|
||||
// Methods for gather information about Mark-Sweep done
|
||||
// in the foreground.
|
||||
void ms_collection_begin();
|
||||
void ms_collection_end(GCCause::Cause gc_cause);
|
||||
|
||||
// Cost for a mark-sweep tenured gen collection done in the foreground
|
||||
double ms_gc_cost() const {
|
||||
return MAX2(0.0F, _avg_ms_gc_cost->average());
|
||||
}
|
||||
|
||||
// Cost of collecting the tenured generation. Includes
|
||||
// concurrent collection and STW collection costs
|
||||
double cms_gc_cost() const;
|
||||
|
||||
// Cost of STW mark-sweep-compact tenured gen collection.
|
||||
double msc_gc_cost() const {
|
||||
return MAX2(0.0F, _avg_msc_gc_cost->average());
|
||||
}
|
||||
|
||||
//
|
||||
double compacting_gc_cost() const {
|
||||
double result = MIN2(1.0, minor_gc_cost() + msc_gc_cost());
|
||||
assert(result >= 0.0, "Both minor and major costs are non-negative");
|
||||
return result;
|
||||
}
|
||||
|
||||
// Restarts the concurrent phases timer.
|
||||
void concurrent_phases_resume();
|
||||
|
||||
// Time beginning and end of the marking phase for
|
||||
// a synchronous MS collection. A MS collection
|
||||
// that finishes in the foreground can have started
|
||||
// in the background. These methods capture the
|
||||
// completion of the marking (after the initial
|
||||
// marking) that is done in the foreground.
|
||||
void ms_collection_marking_begin();
|
||||
void ms_collection_marking_end(GCCause::Cause gc_cause);
|
||||
|
||||
static elapsedTimer* concurrent_timer_ptr() {
|
||||
return &_concurrent_timer;
|
||||
}
|
||||
|
||||
AdaptiveWeightedAverage* avg_cms_promo() const {
|
||||
return _avg_cms_promo;
|
||||
}
|
||||
|
||||
int change_young_gen_for_maj_pauses() {
|
||||
return _change_young_gen_for_maj_pauses;
|
||||
}
|
||||
void set_change_young_gen_for_maj_pauses(int v) {
|
||||
_change_young_gen_for_maj_pauses = v;
|
||||
}
|
||||
|
||||
void clear_internal_time_intervals();
|
||||
|
||||
|
||||
// Either calculated_promo_size_in_bytes() or promo_size()
|
||||
// should be deleted.
|
||||
size_t promo_size() { return _promo_size; }
|
||||
void set_promo_size(size_t v) { _promo_size = v; }
|
||||
|
||||
// Cost of GC for all types of collections.
|
||||
virtual double gc_cost() const;
|
||||
|
||||
size_t generation_alignment() { return _generation_alignment; }
|
||||
|
||||
virtual void compute_eden_space_size(size_t cur_eden,
|
||||
size_t max_eden_size);
|
||||
// Calculates new survivor space size; returns a new tenuring threshold
|
||||
// value. Stores new survivor size in _survivor_size.
|
||||
virtual uint compute_survivor_space_size_and_threshold(
|
||||
bool is_survivor_overflow,
|
||||
uint tenuring_threshold,
|
||||
size_t survivor_limit);
|
||||
|
||||
virtual void compute_tenured_generation_free_space(size_t cur_tenured_free,
|
||||
size_t max_tenured_available,
|
||||
size_t cur_eden);
|
||||
|
||||
size_t eden_decrement_aligned_down(size_t cur_eden);
|
||||
size_t eden_increment_aligned_up(size_t cur_eden);
|
||||
|
||||
size_t adjust_eden_for_pause_time(size_t cur_eden);
|
||||
size_t adjust_eden_for_throughput(size_t cur_eden);
|
||||
size_t adjust_eden_for_footprint(size_t cur_eden);
|
||||
|
||||
size_t promo_decrement_aligned_down(size_t cur_promo);
|
||||
size_t promo_increment_aligned_up(size_t cur_promo);
|
||||
|
||||
size_t adjust_promo_for_pause_time(size_t cur_promo);
|
||||
size_t adjust_promo_for_throughput(size_t cur_promo);
|
||||
size_t adjust_promo_for_footprint(size_t cur_promo, size_t cur_eden);
|
||||
|
||||
// Scale down the input size by the ratio of the cost to collect the
|
||||
// generation to the total GC cost.
|
||||
size_t scale_by_gen_gc_cost(size_t base_change, double gen_gc_cost);
|
||||
|
||||
// Return the value and clear it.
|
||||
bool get_and_clear_first_after_collection();
|
||||
|
||||
// Printing support
|
||||
virtual bool print_adaptive_size_policy_on(outputStream* st) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP
|
@ -23,9 +23,8 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
|
||||
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
|
||||
#include "gc_implementation/parNew/parNewGeneration.hpp"
|
||||
#include "gc_implementation/shared/gcPolicyCounters.hpp"
|
||||
#include "gc_implementation/shared/vmGCOperations.hpp"
|
||||
@ -57,25 +56,12 @@ void ConcurrentMarkSweepPolicy::initialize_generations() {
|
||||
if (_generations == NULL)
|
||||
vm_exit_during_initialization("Unable to allocate gen spec");
|
||||
|
||||
if (UseParNewGC) {
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
_generations[0] = new GenerationSpec(Generation::ASParNew,
|
||||
_initial_young_size, _max_young_size);
|
||||
} else {
|
||||
_generations[0] = new GenerationSpec(Generation::ParNew,
|
||||
_initial_young_size, _max_young_size);
|
||||
}
|
||||
} else {
|
||||
_generations[0] = new GenerationSpec(Generation::DefNew,
|
||||
_initial_young_size, _max_young_size);
|
||||
}
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
_generations[1] = new GenerationSpec(Generation::ASConcurrentMarkSweep,
|
||||
_initial_old_size, _max_old_size);
|
||||
} else {
|
||||
_generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep,
|
||||
_initial_old_size, _max_old_size);
|
||||
}
|
||||
Generation::Name yg_name =
|
||||
UseParNewGC ? Generation::ParNew : Generation::DefNew;
|
||||
_generations[0] = new GenerationSpec(yg_name, _initial_young_size,
|
||||
_max_young_size);
|
||||
_generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep,
|
||||
_initial_old_size, _max_old_size);
|
||||
|
||||
if (_generations[0] == NULL || _generations[1] == NULL) {
|
||||
vm_exit_during_initialization("Unable to allocate gen spec");
|
||||
@ -85,14 +71,12 @@ void ConcurrentMarkSweepPolicy::initialize_generations() {
|
||||
void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
|
||||
size_t init_promo_size,
|
||||
size_t init_survivor_size) {
|
||||
double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
|
||||
double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
|
||||
_size_policy = new CMSAdaptiveSizePolicy(init_eden_size,
|
||||
init_promo_size,
|
||||
init_survivor_size,
|
||||
max_gc_minor_pause_sec,
|
||||
max_gc_pause_sec,
|
||||
GCTimeRatio);
|
||||
_size_policy = new AdaptiveSizePolicy(init_eden_size,
|
||||
init_promo_size,
|
||||
init_survivor_size,
|
||||
max_gc_pause_sec,
|
||||
GCTimeRatio);
|
||||
}
|
||||
|
||||
void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
|
||||
@ -110,22 +94,3 @@ bool ConcurrentMarkSweepPolicy::has_soft_ended_eden()
|
||||
{
|
||||
return CMSIncrementalMode;
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// ASConcurrentMarkSweepPolicy methods
|
||||
//
|
||||
|
||||
void ASConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
|
||||
|
||||
assert(size_policy() != NULL, "A size policy is required");
|
||||
// initialize the policy counters - 2 collectors, 3 generations
|
||||
if (UseParNewGC) {
|
||||
_gc_policy_counters = new CMSGCAdaptivePolicyCounters("ParNew:CMS", 2, 3,
|
||||
size_policy());
|
||||
}
|
||||
else {
|
||||
_gc_policy_counters = new CMSGCAdaptivePolicyCounters("Copy:CMS", 2, 3,
|
||||
size_policy());
|
||||
}
|
||||
}
|
||||
|
@ -47,19 +47,4 @@ class ConcurrentMarkSweepPolicy : public GenCollectorPolicy {
|
||||
virtual bool has_soft_ended_eden();
|
||||
};
|
||||
|
||||
class ASConcurrentMarkSweepPolicy : public ConcurrentMarkSweepPolicy {
|
||||
public:
|
||||
|
||||
// Initialize the jstat counters. This method requires a
|
||||
// size policy. The size policy is expected to be created
|
||||
// after the generations are fully initialized so the
|
||||
// initialization of the counters need to be done post
|
||||
// the initialization of the generations.
|
||||
void initialize_gc_policy_counters();
|
||||
|
||||
virtual CollectorPolicy::Name kind() {
|
||||
return CollectorPolicy::ASConcurrentMarkSweepPolicyKind;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSCOLLECTORPOLICY_HPP
|
||||
|
@ -1,303 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
|
||||
CMSGCAdaptivePolicyCounters::CMSGCAdaptivePolicyCounters(const char* name_arg,
|
||||
int collectors,
|
||||
int generations,
|
||||
AdaptiveSizePolicy* size_policy_arg)
|
||||
: GCAdaptivePolicyCounters(name_arg,
|
||||
collectors,
|
||||
generations,
|
||||
size_policy_arg) {
|
||||
if (UsePerfData) {
|
||||
EXCEPTION_MARK;
|
||||
ResourceMark rm;
|
||||
|
||||
const char* cname =
|
||||
PerfDataManager::counter_name(name_space(), "cmsCapacity");
|
||||
_cms_capacity_counter = PerfDataManager::create_variable(SUN_GC, cname,
|
||||
PerfData::U_Bytes, (jlong) OldSize, CHECK);
|
||||
#ifdef NOT_PRODUCT
|
||||
cname =
|
||||
PerfDataManager::counter_name(name_space(), "initialPause");
|
||||
_initial_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_initial_pause()->last_sample(),
|
||||
CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "remarkPause");
|
||||
_remark_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_remark_pause()->last_sample(),
|
||||
CHECK);
|
||||
#endif
|
||||
cname =
|
||||
PerfDataManager::counter_name(name_space(), "avgInitialPause");
|
||||
_avg_initial_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_initial_pause()->average(),
|
||||
CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "avgRemarkPause");
|
||||
_avg_remark_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_remark_pause()->average(),
|
||||
CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "avgSTWGcCost");
|
||||
_avg_cms_STW_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
|
||||
cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_cms_STW_gc_cost()->average(),
|
||||
CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "avgSTWTime");
|
||||
_avg_cms_STW_time_counter = PerfDataManager::create_variable(SUN_GC,
|
||||
cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_cms_STW_time()->average(),
|
||||
CHECK);
|
||||
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "avgConcurrentTime");
|
||||
_avg_concurrent_time_counter = PerfDataManager::create_variable(SUN_GC,
|
||||
cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_concurrent_time()->average(),
|
||||
CHECK);
|
||||
|
||||
cname =
|
||||
PerfDataManager::counter_name(name_space(), "avgConcurrentInterval");
|
||||
_avg_concurrent_interval_counter = PerfDataManager::create_variable(SUN_GC,
|
||||
cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_concurrent_interval()->average(),
|
||||
CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "avgConcurrentGcCost");
|
||||
_avg_concurrent_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
|
||||
cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_concurrent_gc_cost()->average(),
|
||||
CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "avgCMSFreeAtSweep");
|
||||
_avg_cms_free_at_sweep_counter = PerfDataManager::create_variable(SUN_GC,
|
||||
cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_cms_free_at_sweep()->average(),
|
||||
CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "avgCMSFree");
|
||||
_avg_cms_free_counter = PerfDataManager::create_variable(SUN_GC,
|
||||
cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_cms_free()->average(),
|
||||
CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "avgCMSPromo");
|
||||
_avg_cms_promo_counter = PerfDataManager::create_variable(SUN_GC,
|
||||
cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_cms_promo()->average(),
|
||||
CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "avgMscPause");
|
||||
_avg_msc_pause_counter = PerfDataManager::create_variable(SUN_GC,
|
||||
cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_msc_pause()->average(),
|
||||
CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "avgMscInterval");
|
||||
_avg_msc_interval_counter = PerfDataManager::create_variable(SUN_GC,
|
||||
cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_msc_interval()->average(),
|
||||
CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "mscGcCost");
|
||||
_msc_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
|
||||
cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_msc_gc_cost()->average(),
|
||||
CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "avgMsPause");
|
||||
_avg_ms_pause_counter = PerfDataManager::create_variable(SUN_GC,
|
||||
cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_ms_pause()->average(),
|
||||
CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "avgMsInterval");
|
||||
_avg_ms_interval_counter = PerfDataManager::create_variable(SUN_GC,
|
||||
cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_ms_interval()->average(),
|
||||
CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "msGcCost");
|
||||
_ms_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
|
||||
cname,
|
||||
PerfData::U_Ticks,
|
||||
(jlong) cms_size_policy()->avg_ms_gc_cost()->average(),
|
||||
CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "majorGcCost");
|
||||
_major_gc_cost_counter = PerfDataManager::create_variable(SUN_GC, cname,
|
||||
PerfData::U_Ticks, (jlong) cms_size_policy()->cms_gc_cost(), CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "avgPromotedAvg");
|
||||
_promoted_avg_counter =
|
||||
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
|
||||
cms_size_policy()->calculated_promo_size_in_bytes(), CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "avgPromotedDev");
|
||||
_promoted_avg_dev_counter =
|
||||
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
|
||||
(jlong) 0 , CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "avgPromotedPaddedAvg");
|
||||
_promoted_padded_avg_counter =
|
||||
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
|
||||
cms_size_policy()->calculated_promo_size_in_bytes(), CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(),
|
||||
"changeYoungGenForMajPauses");
|
||||
_change_young_gen_for_maj_pauses_counter =
|
||||
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Events,
|
||||
(jlong)0, CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "remarkPauseOldSlope");
|
||||
_remark_pause_old_slope_counter =
|
||||
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
|
||||
(jlong) cms_size_policy()->remark_pause_old_slope(), CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(name_space(), "initialPauseOldSlope");
|
||||
_initial_pause_old_slope_counter =
|
||||
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
|
||||
(jlong) cms_size_policy()->initial_pause_old_slope(), CHECK);
|
||||
|
||||
cname =
|
||||
PerfDataManager::counter_name(name_space(), "remarkPauseYoungSlope") ;
|
||||
_remark_pause_young_slope_counter =
|
||||
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
|
||||
(jlong) cms_size_policy()->remark_pause_young_slope(), CHECK);
|
||||
|
||||
cname =
|
||||
PerfDataManager::counter_name(name_space(), "initialPauseYoungSlope");
|
||||
_initial_pause_young_slope_counter =
|
||||
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
|
||||
(jlong) cms_size_policy()->initial_pause_young_slope(), CHECK);
|
||||
|
||||
|
||||
}
|
||||
assert(size_policy()->is_gc_cms_adaptive_size_policy(),
|
||||
"Wrong type of size policy");
|
||||
}
|
||||
|
||||
void CMSGCAdaptivePolicyCounters::update_counters() {
|
||||
if (UsePerfData) {
|
||||
GCAdaptivePolicyCounters::update_counters_from_policy();
|
||||
update_counters_from_policy();
|
||||
}
|
||||
}
|
||||
|
||||
void CMSGCAdaptivePolicyCounters::update_counters(CMSGCStats* gc_stats) {
|
||||
if (UsePerfData) {
|
||||
update_counters();
|
||||
update_promoted((size_t) gc_stats->avg_promoted()->last_sample());
|
||||
update_avg_promoted_avg(gc_stats);
|
||||
update_avg_promoted_dev(gc_stats);
|
||||
update_avg_promoted_padded_avg(gc_stats);
|
||||
}
|
||||
}
|
||||
|
||||
void CMSGCAdaptivePolicyCounters::update_counters_from_policy() {
|
||||
if (UsePerfData && (cms_size_policy() != NULL)) {
|
||||
|
||||
GCAdaptivePolicyCounters::update_counters_from_policy();
|
||||
|
||||
update_major_gc_cost_counter();
|
||||
update_mutator_cost_counter();
|
||||
|
||||
update_eden_size();
|
||||
update_promo_size();
|
||||
|
||||
// If these updates from the last_sample() work,
|
||||
// revise the update methods for these counters
|
||||
// (both here and in PS).
|
||||
update_survived((size_t) cms_size_policy()->avg_survived()->last_sample());
|
||||
|
||||
update_avg_concurrent_time_counter();
|
||||
update_avg_concurrent_interval_counter();
|
||||
update_avg_concurrent_gc_cost_counter();
|
||||
#ifdef NOT_PRODUCT
|
||||
update_initial_pause_counter();
|
||||
update_remark_pause_counter();
|
||||
#endif
|
||||
update_avg_initial_pause_counter();
|
||||
update_avg_remark_pause_counter();
|
||||
|
||||
update_avg_cms_STW_time_counter();
|
||||
update_avg_cms_STW_gc_cost_counter();
|
||||
|
||||
update_avg_cms_free_counter();
|
||||
update_avg_cms_free_at_sweep_counter();
|
||||
update_avg_cms_promo_counter();
|
||||
|
||||
update_avg_msc_pause_counter();
|
||||
update_avg_msc_interval_counter();
|
||||
update_msc_gc_cost_counter();
|
||||
|
||||
update_avg_ms_pause_counter();
|
||||
update_avg_ms_interval_counter();
|
||||
update_ms_gc_cost_counter();
|
||||
|
||||
update_avg_old_live_counter();
|
||||
|
||||
update_survivor_size_counters();
|
||||
update_avg_survived_avg_counters();
|
||||
update_avg_survived_dev_counters();
|
||||
|
||||
update_decrement_tenuring_threshold_for_gc_cost();
|
||||
update_increment_tenuring_threshold_for_gc_cost();
|
||||
update_decrement_tenuring_threshold_for_survivor_limit();
|
||||
|
||||
update_change_young_gen_for_maj_pauses();
|
||||
|
||||
update_major_collection_slope_counter();
|
||||
update_remark_pause_old_slope_counter();
|
||||
update_initial_pause_old_slope_counter();
|
||||
update_remark_pause_young_slope_counter();
|
||||
update_initial_pause_young_slope_counter();
|
||||
|
||||
update_decide_at_full_gc_counter();
|
||||
}
|
||||
}
|
@ -1,308 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP
|
||||
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
|
||||
#include "gc_implementation/shared/gcAdaptivePolicyCounters.hpp"
|
||||
#include "gc_implementation/shared/gcStats.hpp"
|
||||
#include "runtime/perfData.hpp"
|
||||
|
||||
// CMSGCAdaptivePolicyCounters is a holder class for performance counters
|
||||
// that track the data and decisions for the ergonomics policy for the
|
||||
// concurrent mark sweep collector
|
||||
|
||||
class CMSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
|
||||
friend class VMStructs;
|
||||
|
||||
private:
|
||||
|
||||
// Capacity of tenured generation recorded at the end of
|
||||
// any collection.
|
||||
PerfVariable* _cms_capacity_counter; // Make this common with PS _old_capacity
|
||||
|
||||
// Average stop-the-world pause time for both initial and
|
||||
// remark pauses sampled at the end of the checkpointRootsFinalWork.
|
||||
PerfVariable* _avg_cms_STW_time_counter;
|
||||
// Average stop-the-world (STW) GC cost for the STW pause time
|
||||
// _avg_cms_STW_time_counter.
|
||||
PerfVariable* _avg_cms_STW_gc_cost_counter;
|
||||
|
||||
#ifdef NOT_PRODUCT
|
||||
// These are useful to see how the most recent values of these
|
||||
// counters compare to their respective averages but
|
||||
// do not control behavior.
|
||||
PerfVariable* _initial_pause_counter;
|
||||
PerfVariable* _remark_pause_counter;
|
||||
#endif
|
||||
|
||||
// Average of the initial marking pause for a concurrent collection.
|
||||
PerfVariable* _avg_initial_pause_counter;
|
||||
// Average of the remark pause for a concurrent collection.
|
||||
PerfVariable* _avg_remark_pause_counter;
|
||||
|
||||
// Average for the sum of all the concurrent times per collection.
|
||||
PerfVariable* _avg_concurrent_time_counter;
|
||||
// Average for the time between the most recent end of a
|
||||
// concurrent collection and the beginning of the next
|
||||
// concurrent collection.
|
||||
PerfVariable* _avg_concurrent_interval_counter;
|
||||
// Average of the concurrent GC costs based on _avg_concurrent_time_counter
|
||||
// and _avg_concurrent_interval_counter.
|
||||
PerfVariable* _avg_concurrent_gc_cost_counter;
|
||||
|
||||
// Average of the free space in the tenured generation at the
|
||||
// end of the sweep of the tenured generation.
|
||||
PerfVariable* _avg_cms_free_counter;
|
||||
// Average of the free space in the tenured generation at the
|
||||
// start of the sweep of the tenured generation.
|
||||
PerfVariable* _avg_cms_free_at_sweep_counter;
|
||||
// Average of the free space in the tenured generation at the
|
||||
// after any resizing of the tenured generation at the end
|
||||
// of a collection of the tenured generation.
|
||||
PerfVariable* _avg_cms_promo_counter;
|
||||
|
||||
// Average of the mark-sweep-compact (MSC) pause time for a collection
|
||||
// of the tenured generation.
|
||||
PerfVariable* _avg_msc_pause_counter;
|
||||
// Average for the time between the most recent end of a
|
||||
// MSC collection and the beginning of the next MSC collection.
|
||||
PerfVariable* _avg_msc_interval_counter;
|
||||
// Average for the GC cost of a MSC collection based on
|
||||
// _avg_msc_pause_counter and _avg_msc_interval_counter.
|
||||
PerfVariable* _msc_gc_cost_counter;
|
||||
|
||||
// Average of the mark-sweep (MS) pause time for a collection
|
||||
// of the tenured generation.
|
||||
PerfVariable* _avg_ms_pause_counter;
|
||||
// Average for the time between the most recent end of a
|
||||
// MS collection and the beginning of the next MS collection.
|
||||
PerfVariable* _avg_ms_interval_counter;
|
||||
// Average for the GC cost of a MS collection based on
|
||||
// _avg_ms_pause_counter and _avg_ms_interval_counter.
|
||||
PerfVariable* _ms_gc_cost_counter;
|
||||
|
||||
// Average of the bytes promoted per minor collection.
|
||||
PerfVariable* _promoted_avg_counter;
|
||||
// Average of the deviation of the promoted average.
|
||||
PerfVariable* _promoted_avg_dev_counter;
|
||||
// Padded average of the bytes promoted per minor collection.
|
||||
PerfVariable* _promoted_padded_avg_counter;
|
||||
|
||||
// See description of the _change_young_gen_for_maj_pauses
|
||||
// variable recently in cmsAdaptiveSizePolicy.hpp.
|
||||
PerfVariable* _change_young_gen_for_maj_pauses_counter;
|
||||
|
||||
// See descriptions of _remark_pause_old_slope, _initial_pause_old_slope,
|
||||
// etc. variables recently in cmsAdaptiveSizePolicy.hpp.
|
||||
PerfVariable* _remark_pause_old_slope_counter;
|
||||
PerfVariable* _initial_pause_old_slope_counter;
|
||||
PerfVariable* _remark_pause_young_slope_counter;
|
||||
PerfVariable* _initial_pause_young_slope_counter;
|
||||
|
||||
CMSAdaptiveSizePolicy* cms_size_policy() {
|
||||
assert(_size_policy->kind() ==
|
||||
AdaptiveSizePolicy::_gc_cms_adaptive_size_policy,
|
||||
"Wrong size policy");
|
||||
return (CMSAdaptiveSizePolicy*)_size_policy;
|
||||
}
|
||||
|
||||
inline void update_avg_cms_STW_time_counter() {
|
||||
_avg_cms_STW_time_counter->set_value(
|
||||
(jlong) (cms_size_policy()->avg_cms_STW_time()->average() *
|
||||
(double) MILLIUNITS));
|
||||
}
|
||||
|
||||
inline void update_avg_cms_STW_gc_cost_counter() {
|
||||
_avg_cms_STW_gc_cost_counter->set_value(
|
||||
(jlong) (cms_size_policy()->avg_cms_STW_gc_cost()->average() * 100.0));
|
||||
}
|
||||
|
||||
inline void update_avg_initial_pause_counter() {
|
||||
_avg_initial_pause_counter->set_value(
|
||||
(jlong) (cms_size_policy()->avg_initial_pause()->average() *
|
||||
(double) MILLIUNITS));
|
||||
}
|
||||
#ifdef NOT_PRODUCT
|
||||
inline void update_avg_remark_pause_counter() {
|
||||
_avg_remark_pause_counter->set_value(
|
||||
(jlong) (cms_size_policy()-> avg_remark_pause()->average() *
|
||||
(double) MILLIUNITS));
|
||||
}
|
||||
|
||||
inline void update_initial_pause_counter() {
|
||||
_initial_pause_counter->set_value(
|
||||
(jlong) (cms_size_policy()->avg_initial_pause()->average() *
|
||||
(double) MILLIUNITS));
|
||||
}
|
||||
#endif
|
||||
inline void update_remark_pause_counter() {
|
||||
_remark_pause_counter->set_value(
|
||||
(jlong) (cms_size_policy()-> avg_remark_pause()->last_sample() *
|
||||
(double) MILLIUNITS));
|
||||
}
|
||||
|
||||
inline void update_avg_concurrent_time_counter() {
|
||||
_avg_concurrent_time_counter->set_value(
|
||||
(jlong) (cms_size_policy()->avg_concurrent_time()->last_sample() *
|
||||
(double) MILLIUNITS));
|
||||
}
|
||||
|
||||
inline void update_avg_concurrent_interval_counter() {
|
||||
_avg_concurrent_interval_counter->set_value(
|
||||
(jlong) (cms_size_policy()->avg_concurrent_interval()->average() *
|
||||
(double) MILLIUNITS));
|
||||
}
|
||||
|
||||
inline void update_avg_concurrent_gc_cost_counter() {
|
||||
_avg_concurrent_gc_cost_counter->set_value(
|
||||
(jlong) (cms_size_policy()->avg_concurrent_gc_cost()->average() * 100.0));
|
||||
}
|
||||
|
||||
inline void update_avg_cms_free_counter() {
|
||||
_avg_cms_free_counter->set_value(
|
||||
(jlong) cms_size_policy()->avg_cms_free()->average());
|
||||
}
|
||||
|
||||
inline void update_avg_cms_free_at_sweep_counter() {
|
||||
_avg_cms_free_at_sweep_counter->set_value(
|
||||
(jlong) cms_size_policy()->avg_cms_free_at_sweep()->average());
|
||||
}
|
||||
|
||||
inline void update_avg_cms_promo_counter() {
|
||||
_avg_cms_promo_counter->set_value(
|
||||
(jlong) cms_size_policy()->avg_cms_promo()->average());
|
||||
}
|
||||
|
||||
inline void update_avg_old_live_counter() {
|
||||
_avg_old_live_counter->set_value(
|
||||
(jlong)(cms_size_policy()->avg_old_live()->average())
|
||||
);
|
||||
}
|
||||
|
||||
inline void update_avg_msc_pause_counter() {
|
||||
_avg_msc_pause_counter->set_value(
|
||||
(jlong) (cms_size_policy()->avg_msc_pause()->average() *
|
||||
(double) MILLIUNITS));
|
||||
}
|
||||
|
||||
inline void update_avg_msc_interval_counter() {
|
||||
_avg_msc_interval_counter->set_value(
|
||||
(jlong) (cms_size_policy()->avg_msc_interval()->average() *
|
||||
(double) MILLIUNITS));
|
||||
}
|
||||
|
||||
inline void update_msc_gc_cost_counter() {
|
||||
_msc_gc_cost_counter->set_value(
|
||||
(jlong) (cms_size_policy()->avg_msc_gc_cost()->average() * 100.0));
|
||||
}
|
||||
|
||||
inline void update_avg_ms_pause_counter() {
|
||||
_avg_ms_pause_counter->set_value(
|
||||
(jlong) (cms_size_policy()->avg_ms_pause()->average() *
|
||||
(double) MILLIUNITS));
|
||||
}
|
||||
|
||||
inline void update_avg_ms_interval_counter() {
|
||||
_avg_ms_interval_counter->set_value(
|
||||
(jlong) (cms_size_policy()->avg_ms_interval()->average() *
|
||||
(double) MILLIUNITS));
|
||||
}
|
||||
|
||||
inline void update_ms_gc_cost_counter() {
|
||||
_ms_gc_cost_counter->set_value(
|
||||
(jlong) (cms_size_policy()->avg_ms_gc_cost()->average() * 100.0));
|
||||
}
|
||||
|
||||
inline void update_major_gc_cost_counter() {
|
||||
_major_gc_cost_counter->set_value(
|
||||
(jlong)(cms_size_policy()->cms_gc_cost() * 100.0)
|
||||
);
|
||||
}
|
||||
inline void update_mutator_cost_counter() {
|
||||
_mutator_cost_counter->set_value(
|
||||
(jlong)(cms_size_policy()->mutator_cost() * 100.0)
|
||||
);
|
||||
}
|
||||
|
||||
inline void update_avg_promoted_avg(CMSGCStats* gc_stats) {
|
||||
_promoted_avg_counter->set_value(
|
||||
(jlong)(gc_stats->avg_promoted()->average())
|
||||
);
|
||||
}
|
||||
inline void update_avg_promoted_dev(CMSGCStats* gc_stats) {
|
||||
_promoted_avg_dev_counter->set_value(
|
||||
(jlong)(gc_stats->avg_promoted()->deviation())
|
||||
);
|
||||
}
|
||||
inline void update_avg_promoted_padded_avg(CMSGCStats* gc_stats) {
|
||||
_promoted_padded_avg_counter->set_value(
|
||||
(jlong)(gc_stats->avg_promoted()->padded_average())
|
||||
);
|
||||
}
|
||||
inline void update_remark_pause_old_slope_counter() {
|
||||
_remark_pause_old_slope_counter->set_value(
|
||||
(jlong)(cms_size_policy()->remark_pause_old_slope() * 1000)
|
||||
);
|
||||
}
|
||||
inline void update_initial_pause_old_slope_counter() {
|
||||
_initial_pause_old_slope_counter->set_value(
|
||||
(jlong)(cms_size_policy()->initial_pause_old_slope() * 1000)
|
||||
);
|
||||
}
|
||||
inline void update_remark_pause_young_slope_counter() {
|
||||
_remark_pause_young_slope_counter->set_value(
|
||||
(jlong)(cms_size_policy()->remark_pause_young_slope() * 1000)
|
||||
);
|
||||
}
|
||||
inline void update_initial_pause_young_slope_counter() {
|
||||
_initial_pause_young_slope_counter->set_value(
|
||||
(jlong)(cms_size_policy()->initial_pause_young_slope() * 1000)
|
||||
);
|
||||
}
|
||||
inline void update_change_young_gen_for_maj_pauses() {
|
||||
_change_young_gen_for_maj_pauses_counter->set_value(
|
||||
cms_size_policy()->change_young_gen_for_maj_pauses());
|
||||
}
|
||||
|
||||
public:
|
||||
CMSGCAdaptivePolicyCounters(const char* name, int collectors, int generations,
|
||||
AdaptiveSizePolicy* size_policy);
|
||||
|
||||
// update counters
|
||||
void update_counters();
|
||||
void update_counters(CMSGCStats* gc_stats);
|
||||
void update_counters_from_policy();
|
||||
|
||||
inline void update_cms_capacity_counter(size_t size_in_bytes) {
|
||||
_cms_capacity_counter->set_value(size_in_bytes);
|
||||
}
|
||||
|
||||
virtual GCPolicyCounters::Name kind() const {
|
||||
return GCPolicyCounters::CMSGCAdaptivePolicyCountersKind;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP
|
@ -70,7 +70,6 @@ class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
|
||||
class CompactibleFreeListSpace: public CompactibleSpace {
|
||||
friend class VMStructs;
|
||||
friend class ConcurrentMarkSweepGeneration;
|
||||
friend class ASConcurrentMarkSweepGeneration;
|
||||
friend class CMSCollector;
|
||||
// Local alloc buffer for promotion into this space.
|
||||
friend class CFLS_LAB;
|
||||
|
@ -27,9 +27,8 @@
|
||||
#include "classfile/stringTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
|
||||
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
|
||||
@ -319,27 +318,13 @@ void CMSCollector::ref_processor_init() {
|
||||
}
|
||||
}
|
||||
|
||||
CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
|
||||
AdaptiveSizePolicy* CMSCollector::size_policy() {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
|
||||
"Wrong type of heap");
|
||||
CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
|
||||
gch->gen_policy()->size_policy();
|
||||
assert(sp->is_gc_cms_adaptive_size_policy(),
|
||||
"Wrong type of size policy");
|
||||
return sp;
|
||||
return gch->gen_policy()->size_policy();
|
||||
}
|
||||
|
||||
CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
|
||||
CMSGCAdaptivePolicyCounters* results =
|
||||
(CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
|
||||
assert(
|
||||
results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
|
||||
"Wrong gc policy counter kind");
|
||||
return results;
|
||||
}
|
||||
|
||||
|
||||
void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
|
||||
|
||||
const char* gen_name = "old";
|
||||
@ -1573,11 +1558,11 @@ bool CMSCollector::shouldConcurrentCollect() {
|
||||
}
|
||||
|
||||
if (MetaspaceGC::should_concurrent_collect()) {
|
||||
if (Verbose && PrintGCDetails) {
|
||||
if (Verbose && PrintGCDetails) {
|
||||
gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// CMSTriggerInterval starts a CMS cycle if enough time has passed.
|
||||
if (CMSTriggerInterval >= 0) {
|
||||
@ -2031,11 +2016,6 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
|
||||
"collections passed to foreground collector", _full_gcs_since_conc_gc);
|
||||
}
|
||||
|
||||
// Sample collection interval time and reset for collection pause.
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
size_policy()->msc_collection_begin();
|
||||
}
|
||||
|
||||
// Temporarily widen the span of the weak reference processing to
|
||||
// the entire heap.
|
||||
MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
|
||||
@ -2111,11 +2091,6 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
|
||||
_inter_sweep_timer.reset();
|
||||
_inter_sweep_timer.start();
|
||||
|
||||
// Sample collection pause time and reset for collection interval.
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
size_policy()->msc_collection_end(gch->gc_cause());
|
||||
}
|
||||
|
||||
gc_timer->register_gc_end();
|
||||
|
||||
gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
|
||||
@ -2373,26 +2348,14 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Caus
|
||||
}
|
||||
break;
|
||||
case Precleaning:
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
size_policy()->concurrent_precleaning_begin();
|
||||
}
|
||||
// marking from roots in markFromRoots has been completed
|
||||
preclean();
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
size_policy()->concurrent_precleaning_end();
|
||||
}
|
||||
assert(_collectorState == AbortablePreclean ||
|
||||
_collectorState == FinalMarking,
|
||||
"Collector state should have changed");
|
||||
break;
|
||||
case AbortablePreclean:
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
size_policy()->concurrent_phases_resume();
|
||||
}
|
||||
abortable_preclean();
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
size_policy()->concurrent_precleaning_end();
|
||||
}
|
||||
assert(_collectorState == FinalMarking, "Collector state should "
|
||||
"have changed");
|
||||
break;
|
||||
@ -2406,23 +2369,12 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Caus
|
||||
assert(_foregroundGCShouldWait, "block post-condition");
|
||||
break;
|
||||
case Sweeping:
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
size_policy()->concurrent_sweeping_begin();
|
||||
}
|
||||
// final marking in checkpointRootsFinal has been completed
|
||||
sweep(true);
|
||||
assert(_collectorState == Resizing, "Collector state change "
|
||||
"to Resizing must be done under the free_list_lock");
|
||||
_full_gcs_since_conc_gc = 0;
|
||||
|
||||
// Stop the timers for adaptive size policy for the concurrent phases
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
size_policy()->concurrent_sweeping_end();
|
||||
size_policy()->concurrent_phases_end(gch->gc_cause(),
|
||||
gch->prev_gen(_cmsGen)->capacity(),
|
||||
_cmsGen->free());
|
||||
}
|
||||
|
||||
case Resizing: {
|
||||
// Sweeping has been completed...
|
||||
// At this point the background collection has completed.
|
||||
@ -2539,9 +2491,6 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Caus
|
||||
const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
|
||||
NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
|
||||
true, NULL, gc_id);)
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
size_policy()->ms_collection_begin();
|
||||
}
|
||||
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
|
||||
|
||||
HandleMark hm; // Discard invalid handles created during verification
|
||||
@ -2633,11 +2582,6 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Caus
|
||||
}
|
||||
}
|
||||
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
size_policy()->ms_collection_end(gch->gc_cause());
|
||||
}
|
||||
|
||||
if (VerifyAfterGC &&
|
||||
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
||||
Universe::verify();
|
||||
@ -3053,20 +2997,21 @@ void CMSCollector::verify_after_remark_work_1() {
|
||||
HandleMark hm;
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
|
||||
// Get a clear set of claim bits for the strong roots processing to work with.
|
||||
// Get a clear set of claim bits for the roots processing to work with.
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
// Mark from roots one level into CMS
|
||||
MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
||||
|
||||
gch->gen_process_strong_roots(_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::ScanningOption(roots_scanning_options()),
|
||||
¬Older,
|
||||
NULL,
|
||||
NULL); // SSS: Provide correct closure
|
||||
gch->gen_process_roots(_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
NULL,
|
||||
NULL); // SSS: Provide correct closure
|
||||
|
||||
// Now mark from the roots
|
||||
MarkFromRootsClosure markFromRootsClosure(this, _span,
|
||||
@ -3117,22 +3062,24 @@ void CMSCollector::verify_after_remark_work_2() {
|
||||
HandleMark hm;
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
|
||||
// Get a clear set of claim bits for the strong roots processing to work with.
|
||||
// Get a clear set of claim bits for the roots processing to work with.
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
// Mark from roots one level into CMS
|
||||
MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
|
||||
markBitMap());
|
||||
KlassToOopClosure klass_closure(¬Older);
|
||||
CLDToOopClosure cld_closure(¬Older, true);
|
||||
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
||||
gch->gen_process_strong_roots(_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::ScanningOption(roots_scanning_options()),
|
||||
¬Older,
|
||||
NULL,
|
||||
&klass_closure);
|
||||
|
||||
gch->gen_process_roots(_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
NULL,
|
||||
&cld_closure);
|
||||
|
||||
// Now mark from the roots
|
||||
MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
|
||||
@ -3319,12 +3266,10 @@ bool ConcurrentMarkSweepGeneration::is_too_full() const {
|
||||
void CMSCollector::setup_cms_unloading_and_verification_state() {
|
||||
const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
|
||||
|| VerifyBeforeExit;
|
||||
const int rso = SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache;
|
||||
const int rso = SharedHeap::SO_AllCodeCache;
|
||||
|
||||
// We set the proper root for this CMS cycle here.
|
||||
if (should_unload_classes()) { // Should unload classes this cycle
|
||||
remove_root_scanning_option(SharedHeap::SO_AllClasses);
|
||||
add_root_scanning_option(SharedHeap::SO_SystemClasses);
|
||||
remove_root_scanning_option(rso); // Shrink the root set appropriately
|
||||
set_verifying(should_verify); // Set verification state for this cycle
|
||||
return; // Nothing else needs to be done at this time
|
||||
@ -3332,8 +3277,6 @@ void CMSCollector::setup_cms_unloading_and_verification_state() {
|
||||
|
||||
// Not unloading classes this cycle
|
||||
assert(!should_unload_classes(), "Inconsistency!");
|
||||
remove_root_scanning_option(SharedHeap::SO_SystemClasses);
|
||||
add_root_scanning_option(SharedHeap::SO_AllClasses);
|
||||
|
||||
if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
|
||||
// Include symbols, strings and code cache elements to prevent their resurrection.
|
||||
@ -3687,9 +3630,6 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
|
||||
|
||||
NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
|
||||
PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
size_policy()->checkpoint_roots_initial_begin();
|
||||
}
|
||||
|
||||
// Reset all the PLAB chunk arrays if necessary.
|
||||
if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
|
||||
@ -3744,15 +3684,16 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
|
||||
gch->set_par_threads(0);
|
||||
} else {
|
||||
// The serial version.
|
||||
KlassToOopClosure klass_closure(¬Older);
|
||||
CLDToOopClosure cld_closure(¬Older, true);
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
||||
gch->gen_process_strong_roots(_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::ScanningOption(roots_scanning_options()),
|
||||
¬Older,
|
||||
NULL,
|
||||
&klass_closure);
|
||||
gch->gen_process_roots(_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
true, // activate StrongRootsScope
|
||||
SharedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
NULL,
|
||||
&cld_closure);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3769,9 +3710,6 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
|
||||
// Save the end of the used_region of the constituent generations
|
||||
// to be used to limit the extent of sweep in each generation.
|
||||
save_sweep_limits();
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
|
||||
}
|
||||
verify_overflow_empty();
|
||||
}
|
||||
|
||||
@ -3788,15 +3726,6 @@ bool CMSCollector::markFromRoots(bool asynch) {
|
||||
|
||||
bool res;
|
||||
if (asynch) {
|
||||
|
||||
// Start the timers for adaptive size policy for the concurrent phases
|
||||
// Do it here so that the foreground MS can use the concurrent
|
||||
// timer since a foreground MS might has the sweep done concurrently
|
||||
// or STW.
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
size_policy()->concurrent_marking_begin();
|
||||
}
|
||||
|
||||
// Weak ref discovery note: We may be discovering weak
|
||||
// refs in this generation concurrent (but interleaved) with
|
||||
// weak ref discovery by a younger generation collector.
|
||||
@ -3814,22 +3743,12 @@ bool CMSCollector::markFromRoots(bool asynch) {
|
||||
gclog_or_tty->print_cr("bailing out to foreground collection");
|
||||
}
|
||||
}
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
size_policy()->concurrent_marking_end();
|
||||
}
|
||||
} else {
|
||||
assert(SafepointSynchronize::is_at_safepoint(),
|
||||
"inconsistent with asynch == false");
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
size_policy()->ms_collection_marking_begin();
|
||||
}
|
||||
// already have locks
|
||||
res = markFromRootsWork(asynch);
|
||||
_collectorState = FinalMarking;
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
size_policy()->ms_collection_marking_end(gch->gc_cause());
|
||||
}
|
||||
}
|
||||
verify_overflow_empty();
|
||||
return res;
|
||||
@ -4705,8 +4624,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
|
||||
|
||||
if (clean_survivor) { // preclean the active survivor space(s)
|
||||
assert(_young_gen->kind() == Generation::DefNew ||
|
||||
_young_gen->kind() == Generation::ParNew ||
|
||||
_young_gen->kind() == Generation::ASParNew,
|
||||
_young_gen->kind() == Generation::ParNew,
|
||||
"incorrect type for cast");
|
||||
DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
|
||||
PushAndMarkClosure pam_cl(this, _span, ref_processor(),
|
||||
@ -5077,10 +4995,6 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
|
||||
assert(haveFreelistLocks(), "must have free list locks");
|
||||
assert_lock_strong(bitMapLock());
|
||||
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
size_policy()->checkpoint_roots_final_begin();
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
|
||||
@ -5214,9 +5128,6 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
|
||||
"Should be clear by end of the final marking");
|
||||
assert(_ct->klass_rem_set()->mod_union_is_clear(),
|
||||
"Should be clear by end of the final marking");
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
size_policy()->checkpoint_roots_final_end(gch->gc_cause());
|
||||
}
|
||||
}
|
||||
|
||||
void CMSParInitialMarkTask::work(uint worker_id) {
|
||||
@ -5228,7 +5139,6 @@ void CMSParInitialMarkTask::work(uint worker_id) {
|
||||
_timer.start();
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
|
||||
KlassToOopClosure klass_closure(&par_mri_cl);
|
||||
|
||||
// ---------- young gen roots --------------
|
||||
{
|
||||
@ -5244,13 +5154,17 @@ void CMSParInitialMarkTask::work(uint worker_id) {
|
||||
// ---------- remaining roots --------------
|
||||
_timer.reset();
|
||||
_timer.start();
|
||||
gch->gen_process_strong_roots(_collector->_cmsGen->level(),
|
||||
false, // yg was scanned above
|
||||
false, // this is parallel code
|
||||
SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
|
||||
&par_mri_cl,
|
||||
NULL,
|
||||
&klass_closure);
|
||||
|
||||
CLDToOopClosure cld_closure(&par_mri_cl, true);
|
||||
|
||||
gch->gen_process_roots(_collector->_cmsGen->level(),
|
||||
false, // yg was scanned above
|
||||
false, // this is parallel code
|
||||
SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
|
||||
_collector->should_unload_classes(),
|
||||
&par_mri_cl,
|
||||
NULL,
|
||||
&cld_closure);
|
||||
assert(_collector->should_unload_classes()
|
||||
|| (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
|
||||
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
|
||||
@ -5379,13 +5293,15 @@ void CMSParRemarkTask::work(uint worker_id) {
|
||||
// ---------- remaining roots --------------
|
||||
_timer.reset();
|
||||
_timer.start();
|
||||
gch->gen_process_strong_roots(_collector->_cmsGen->level(),
|
||||
false, // yg was scanned above
|
||||
false, // this is parallel code
|
||||
SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
|
||||
&par_mrias_cl,
|
||||
NULL,
|
||||
NULL); // The dirty klasses will be handled below
|
||||
gch->gen_process_roots(_collector->_cmsGen->level(),
|
||||
false, // yg was scanned above
|
||||
false, // this is parallel code
|
||||
SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
|
||||
_collector->should_unload_classes(),
|
||||
&par_mrias_cl,
|
||||
NULL,
|
||||
NULL); // The dirty klasses will be handled below
|
||||
|
||||
assert(_collector->should_unload_classes()
|
||||
|| (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
|
||||
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
|
||||
@ -5440,7 +5356,7 @@ void CMSParRemarkTask::work(uint worker_id) {
|
||||
// We might have added oops to ClassLoaderData::_handles during the
|
||||
// concurrent marking phase. These oops point to newly allocated objects
|
||||
// that are guaranteed to be kept alive. Either by the direct allocation
|
||||
// code, or when the young collector processes the strong roots. Hence,
|
||||
// code, or when the young collector processes the roots. Hence,
|
||||
// we don't have to revisit the _handles block during the remark phase.
|
||||
|
||||
// ---------- rescan dirty cards ------------
|
||||
@ -5862,7 +5778,7 @@ void CMSCollector::do_remark_parallel() {
|
||||
cms_space,
|
||||
n_workers, workers, task_queues());
|
||||
|
||||
// Set up for parallel process_strong_roots work.
|
||||
// Set up for parallel process_roots work.
|
||||
gch->set_par_threads(n_workers);
|
||||
// We won't be iterating over the cards in the card table updating
|
||||
// the younger_gen cards, so we shouldn't call the following else
|
||||
@ -5871,7 +5787,7 @@ void CMSCollector::do_remark_parallel() {
|
||||
// gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
|
||||
|
||||
// The young gen rescan work will not be done as part of
|
||||
// process_strong_roots (which currently doesn't knw how to
|
||||
// process_roots (which currently doesn't know how to
|
||||
// parallelize such a scan), but rather will be broken up into
|
||||
// a set of parallel tasks (via the sampling that the [abortable]
|
||||
// preclean phase did of EdenSpace, plus the [two] tasks of
|
||||
@ -5968,13 +5884,15 @@ void CMSCollector::do_remark_non_parallel() {
|
||||
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
||||
GenCollectedHeap::StrongRootsScope srs(gch);
|
||||
gch->gen_process_strong_roots(_cmsGen->level(),
|
||||
true, // younger gens as roots
|
||||
false, // use the local StrongRootsScope
|
||||
SharedHeap::ScanningOption(roots_scanning_options()),
|
||||
&mrias_cl,
|
||||
NULL,
|
||||
NULL); // The dirty klasses will be handled below
|
||||
|
||||
gch->gen_process_roots(_cmsGen->level(),
|
||||
true, // younger gens as roots
|
||||
false, // use the local StrongRootsScope
|
||||
SharedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
&mrias_cl,
|
||||
NULL,
|
||||
NULL); // The dirty klasses will be handled below
|
||||
|
||||
assert(should_unload_classes()
|
||||
|| (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
|
||||
@ -6014,7 +5932,7 @@ void CMSCollector::do_remark_non_parallel() {
|
||||
// We might have added oops to ClassLoaderData::_handles during the
|
||||
// concurrent marking phase. These oops point to newly allocated objects
|
||||
// that are guaranteed to be kept alive. Either by the direct allocation
|
||||
// code, or when the young collector processes the strong roots. Hence,
|
||||
// code, or when the young collector processes the roots. Hence,
|
||||
// we don't have to revisit the _handles block during the remark phase.
|
||||
|
||||
verify_work_stacks_empty();
|
||||
@ -6264,15 +6182,14 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
|
||||
// Clean up unreferenced symbols in symbol table.
|
||||
SymbolTable::unlink();
|
||||
}
|
||||
|
||||
{
|
||||
GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
|
||||
// Delete entries for dead interned strings.
|
||||
StringTable::unlink(&_is_alive_closure);
|
||||
}
|
||||
}
|
||||
|
||||
// CMS doesn't use the StringTable as hard roots when class unloading is turned off.
|
||||
// Need to check if we really scanned the StringTable.
|
||||
if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
|
||||
GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
|
||||
// Delete entries for dead interned strings.
|
||||
StringTable::unlink(&_is_alive_closure);
|
||||
}
|
||||
|
||||
// Restore any preserved marks as a result of mark stack or
|
||||
// work queue overflow
|
||||
@ -6329,7 +6246,6 @@ void CMSCollector::sweep(bool asynch) {
|
||||
|
||||
_inter_sweep_timer.stop();
|
||||
_inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
|
||||
size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
|
||||
|
||||
assert(!_intra_sweep_timer.is_active(), "Should not be active");
|
||||
_intra_sweep_timer.reset();
|
||||
@ -6454,17 +6370,6 @@ void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
|
||||
}
|
||||
}
|
||||
|
||||
CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
|
||||
"Wrong type of heap");
|
||||
CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
|
||||
gch->gen_policy()->size_policy();
|
||||
assert(sp->is_gc_cms_adaptive_size_policy(),
|
||||
"Wrong type of size policy");
|
||||
return sp;
|
||||
}
|
||||
|
||||
void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
|
||||
if (PrintGCDetails && Verbose) {
|
||||
gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
|
||||
@ -6540,9 +6445,6 @@ void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
|
||||
// Reset CMS data structures (for now just the marking bit map)
|
||||
// preparatory for the next cycle.
|
||||
void CMSCollector::reset(bool asynch) {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
CMSAdaptiveSizePolicy* sp = size_policy();
|
||||
AdaptiveSizePolicyOutput(sp, gch->total_collections());
|
||||
if (asynch) {
|
||||
CMSTokenSyncWithLocks ts(true, bitMapLock());
|
||||
|
||||
@ -6597,7 +6499,7 @@ void CMSCollector::reset(bool asynch) {
|
||||
// Because only the full (i.e., concurrent mode failure) collections
|
||||
// are being measured for gc overhead limits, clean the "near" flag
|
||||
// and count.
|
||||
sp->reset_gc_overhead_limit_count();
|
||||
size_policy()->reset_gc_overhead_limit_count();
|
||||
_collectorState = Idling;
|
||||
} else {
|
||||
// already have the lock
|
||||
@ -7064,7 +6966,6 @@ void MarkRefsIntoAndScanClosure::do_yield_work() {
|
||||
ConcurrentMarkSweepThread::desynchronize(true);
|
||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||
_collector->stopTimer();
|
||||
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
||||
if (PrintCMSStatistics != 0) {
|
||||
_collector->incrementYields();
|
||||
}
|
||||
@ -7225,7 +7126,6 @@ void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
|
||||
ConcurrentMarkSweepThread::desynchronize(true);
|
||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||
_collector->stopTimer();
|
||||
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
||||
if (PrintCMSStatistics != 0) {
|
||||
_collector->incrementYields();
|
||||
}
|
||||
@ -7298,7 +7198,6 @@ void SurvivorSpacePrecleanClosure::do_yield_work() {
|
||||
ConcurrentMarkSweepThread::desynchronize(true);
|
||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||
_collector->stopTimer();
|
||||
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
||||
if (PrintCMSStatistics != 0) {
|
||||
_collector->incrementYields();
|
||||
}
|
||||
@ -7457,7 +7356,6 @@ void MarkFromRootsClosure::do_yield_work() {
|
||||
ConcurrentMarkSweepThread::desynchronize(true);
|
||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||
_collector->stopTimer();
|
||||
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
||||
if (PrintCMSStatistics != 0) {
|
||||
_collector->incrementYields();
|
||||
}
|
||||
@ -8099,7 +7997,6 @@ void CMSPrecleanRefsYieldClosure::do_yield_work() {
|
||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||
|
||||
_collector->stopTimer();
|
||||
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
||||
if (PrintCMSStatistics != 0) {
|
||||
_collector->incrementYields();
|
||||
}
|
||||
@ -8780,7 +8677,6 @@ void SweepClosure::do_yield_work(HeapWord* addr) {
|
||||
ConcurrentMarkSweepThread::desynchronize(true);
|
||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||
_collector->stopTimer();
|
||||
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
||||
if (PrintCMSStatistics != 0) {
|
||||
_collector->incrementYields();
|
||||
}
|
||||
@ -9327,172 +9223,6 @@ bool CMSCollector::no_preserved_marks() const {
|
||||
}
|
||||
#endif
|
||||
|
||||
CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
|
||||
{
|
||||
GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
|
||||
CMSAdaptiveSizePolicy* size_policy =
|
||||
(CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
|
||||
assert(size_policy->is_gc_cms_adaptive_size_policy(),
|
||||
"Wrong type for size policy");
|
||||
return size_policy;
|
||||
}
|
||||
|
||||
void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
|
||||
size_t desired_promo_size) {
|
||||
if (cur_promo_size < desired_promo_size) {
|
||||
size_t expand_bytes = desired_promo_size - cur_promo_size;
|
||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||
gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
|
||||
"Expanding tenured generation by " SIZE_FORMAT " (bytes)",
|
||||
expand_bytes);
|
||||
}
|
||||
expand(expand_bytes,
|
||||
MinHeapDeltaBytes,
|
||||
CMSExpansionCause::_adaptive_size_policy);
|
||||
} else if (desired_promo_size < cur_promo_size) {
|
||||
size_t shrink_bytes = cur_promo_size - desired_promo_size;
|
||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||
gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
|
||||
"Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
|
||||
shrink_bytes);
|
||||
}
|
||||
shrink(shrink_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
CMSGCAdaptivePolicyCounters* counters =
|
||||
(CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
|
||||
assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
|
||||
"Wrong kind of counters");
|
||||
return counters;
|
||||
}
|
||||
|
||||
|
||||
void ASConcurrentMarkSweepGeneration::update_counters() {
|
||||
if (UsePerfData) {
|
||||
_space_counters->update_all();
|
||||
_gen_counters->update_all();
|
||||
CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
|
||||
assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
|
||||
"Wrong gc statistics type");
|
||||
counters->update_counters(gc_stats_l);
|
||||
}
|
||||
}
|
||||
|
||||
void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
|
||||
if (UsePerfData) {
|
||||
_space_counters->update_used(used);
|
||||
_space_counters->update_capacity();
|
||||
_gen_counters->update_all();
|
||||
|
||||
CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
|
||||
assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
|
||||
"Wrong gc statistics type");
|
||||
counters->update_counters(gc_stats_l);
|
||||
}
|
||||
}
|
||||
|
||||
void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
|
||||
assert_locked_or_safepoint(Heap_lock);
|
||||
assert_lock_strong(freelistLock());
|
||||
HeapWord* old_end = _cmsSpace->end();
|
||||
HeapWord* unallocated_start = _cmsSpace->unallocated_block();
|
||||
assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
|
||||
FreeChunk* chunk_at_end = find_chunk_at_end();
|
||||
if (chunk_at_end == NULL) {
|
||||
// No room to shrink
|
||||
if (PrintGCDetails && Verbose) {
|
||||
gclog_or_tty->print_cr("No room to shrink: old_end "
|
||||
PTR_FORMAT " unallocated_start " PTR_FORMAT
|
||||
" chunk_at_end " PTR_FORMAT,
|
||||
old_end, unallocated_start, chunk_at_end);
|
||||
}
|
||||
return;
|
||||
} else {
|
||||
|
||||
// Find the chunk at the end of the space and determine
|
||||
// how much it can be shrunk.
|
||||
size_t shrinkable_size_in_bytes = chunk_at_end->size();
|
||||
size_t aligned_shrinkable_size_in_bytes =
|
||||
align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
|
||||
assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
|
||||
"Inconsistent chunk at end of space");
|
||||
size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
|
||||
size_t word_size_before = heap_word_size(_virtual_space.committed_size());
|
||||
|
||||
// Shrink the underlying space
|
||||
_virtual_space.shrink_by(bytes);
|
||||
if (PrintGCDetails && Verbose) {
|
||||
gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
|
||||
" desired_bytes " SIZE_FORMAT
|
||||
" shrinkable_size_in_bytes " SIZE_FORMAT
|
||||
" aligned_shrinkable_size_in_bytes " SIZE_FORMAT
|
||||
" bytes " SIZE_FORMAT,
|
||||
desired_bytes, shrinkable_size_in_bytes,
|
||||
aligned_shrinkable_size_in_bytes, bytes);
|
||||
gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
|
||||
" unallocated_start " SIZE_FORMAT,
|
||||
old_end, unallocated_start);
|
||||
}
|
||||
|
||||
// If the space did shrink (shrinking is not guaranteed),
|
||||
// shrink the chunk at the end by the appropriate amount.
|
||||
if (((HeapWord*)_virtual_space.high()) < old_end) {
|
||||
size_t new_word_size =
|
||||
heap_word_size(_virtual_space.committed_size());
|
||||
|
||||
// Have to remove the chunk from the dictionary because it is changing
|
||||
// size and might be someplace elsewhere in the dictionary.
|
||||
|
||||
// Get the chunk at end, shrink it, and put it
|
||||
// back.
|
||||
_cmsSpace->removeChunkFromDictionary(chunk_at_end);
|
||||
size_t word_size_change = word_size_before - new_word_size;
|
||||
size_t chunk_at_end_old_size = chunk_at_end->size();
|
||||
assert(chunk_at_end_old_size >= word_size_change,
|
||||
"Shrink is too large");
|
||||
chunk_at_end->set_size(chunk_at_end_old_size -
|
||||
word_size_change);
|
||||
_cmsSpace->freed((HeapWord*) chunk_at_end->end(),
|
||||
word_size_change);
|
||||
|
||||
_cmsSpace->returnChunkToDictionary(chunk_at_end);
|
||||
|
||||
MemRegion mr(_cmsSpace->bottom(), new_word_size);
|
||||
_bts->resize(new_word_size); // resize the block offset shared array
|
||||
Universe::heap()->barrier_set()->resize_covered_region(mr);
|
||||
_cmsSpace->assert_locked();
|
||||
_cmsSpace->set_end((HeapWord*)_virtual_space.high());
|
||||
|
||||
NOT_PRODUCT(_cmsSpace->dictionary()->verify());
|
||||
|
||||
// update the space and generation capacity counters
|
||||
if (UsePerfData) {
|
||||
_space_counters->update_capacity();
|
||||
_gen_counters->update_all();
|
||||
}
|
||||
|
||||
if (Verbose && PrintGCDetails) {
|
||||
size_t new_mem_size = _virtual_space.committed_size();
|
||||
size_t old_mem_size = new_mem_size + bytes;
|
||||
gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
|
||||
name(), old_mem_size/K, bytes/K, new_mem_size/K);
|
||||
}
|
||||
}
|
||||
|
||||
assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
|
||||
"Inconsistency at end of space");
|
||||
assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
|
||||
"Shrinking is inconsistent");
|
||||
return;
|
||||
}
|
||||
}
|
||||
// Transfer some number of overflown objects to usual marking
|
||||
// stack. Return true if some objects were transferred.
|
||||
bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "gc_implementation/shared/generationCounters.hpp"
|
||||
#include "memory/freeBlockDictionary.hpp"
|
||||
#include "memory/generation.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/virtualspace.hpp"
|
||||
#include "services/memoryService.hpp"
|
||||
@ -52,7 +53,7 @@
|
||||
// Concurrent mode failures are currently handled by
|
||||
// means of a sliding mark-compact.
|
||||
|
||||
class CMSAdaptiveSizePolicy;
|
||||
class AdaptiveSizePolicy;
|
||||
class CMSConcMarkingTask;
|
||||
class CMSGCAdaptivePolicyCounters;
|
||||
class CMSTracer;
|
||||
@ -1009,8 +1010,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||
void icms_wait(); // Called at yield points.
|
||||
|
||||
// Adaptive size policy
|
||||
CMSAdaptiveSizePolicy* size_policy();
|
||||
CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
|
||||
AdaptiveSizePolicy* size_policy();
|
||||
|
||||
static void print_on_error(outputStream* st);
|
||||
|
||||
@ -1150,9 +1150,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||
|
||||
virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
|
||||
|
||||
// Adaptive size policy
|
||||
CMSAdaptiveSizePolicy* size_policy();
|
||||
|
||||
void set_did_compact(bool v) { _did_compact = v; }
|
||||
|
||||
bool refs_discovery_is_atomic() const { return false; }
|
||||
@ -1346,37 +1343,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||
void rotate_debug_collection_type();
|
||||
};
|
||||
|
||||
class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
|
||||
|
||||
// Return the size policy from the heap's collector
|
||||
// policy casted to CMSAdaptiveSizePolicy*.
|
||||
CMSAdaptiveSizePolicy* cms_size_policy() const;
|
||||
|
||||
// Resize the generation based on the adaptive size
|
||||
// policy.
|
||||
void resize(size_t cur_promo, size_t desired_promo);
|
||||
|
||||
// Return the GC counters from the collector policy
|
||||
CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
|
||||
|
||||
virtual void shrink_by(size_t bytes);
|
||||
|
||||
public:
|
||||
ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
|
||||
int level, CardTableRS* ct,
|
||||
bool use_adaptive_freelists,
|
||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice
|
||||
dictionaryChoice) :
|
||||
ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
|
||||
use_adaptive_freelists, dictionaryChoice) {}
|
||||
|
||||
virtual const char* short_name() const { return "ASCMS"; }
|
||||
virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
|
||||
|
||||
virtual void update_counters();
|
||||
virtual void update_counters(size_t used);
|
||||
};
|
||||
|
||||
//
|
||||
// Closures of various sorts used by CMS to accomplish its work
|
||||
//
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "gc_implementation/g1/concurrentMark.inline.hpp"
|
||||
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
@ -39,6 +40,7 @@
|
||||
#include "gc_implementation/shared/gcTimer.hpp"
|
||||
#include "gc_implementation/shared/gcTrace.hpp"
|
||||
#include "gc_implementation/shared/gcTraceTime.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/referencePolicy.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
@ -58,8 +60,8 @@ CMBitMapRO::CMBitMapRO(int shifter) :
|
||||
_bmWordSize = 0;
|
||||
}
|
||||
|
||||
HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
|
||||
HeapWord* limit) const {
|
||||
HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
|
||||
const HeapWord* limit) const {
|
||||
// First we must round addr *up* to a possible object boundary.
|
||||
addr = (HeapWord*)align_size_up((intptr_t)addr,
|
||||
HeapWordSize << _shifter);
|
||||
@ -76,8 +78,8 @@ HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
|
||||
return nextAddr;
|
||||
}
|
||||
|
||||
HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
|
||||
HeapWord* limit) const {
|
||||
HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
|
||||
const HeapWord* limit) const {
|
||||
size_t addrOffset = heapWordToOffset(addr);
|
||||
if (limit == NULL) {
|
||||
limit = _bmStartWord + _bmWordSize;
|
||||
@ -1223,6 +1225,9 @@ public:
|
||||
};
|
||||
|
||||
void ConcurrentMark::scanRootRegions() {
|
||||
// Start of concurrent marking.
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
// scan_in_progress() will have been set to true only if there was
|
||||
// at least one root region to scan. So, if it's false, we
|
||||
// should not attempt to do any further work.
|
||||
@ -1271,7 +1276,7 @@ void ConcurrentMark::markFromRoots() {
|
||||
CMConcurrentMarkingTask markingTask(this, cmThread());
|
||||
if (use_parallel_marking_threads()) {
|
||||
_parallel_workers->set_active_workers((int)active_workers);
|
||||
// Don't set _n_par_threads because it affects MT in process_strong_roots()
|
||||
// Don't set _n_par_threads because it affects MT in process_roots()
|
||||
// and the decisions on that MT processing is made elsewhere.
|
||||
assert(_parallel_workers->active_workers() > 0, "Should have been set");
|
||||
_parallel_workers->run_task(&markingTask);
|
||||
@ -2142,23 +2147,29 @@ void ConcurrentMark::cleanup() {
|
||||
// Update the soft reference policy with the new heap occupancy.
|
||||
Universe::update_heap_info_at_gc();
|
||||
|
||||
// We need to make this be a "collection" so any collection pause that
|
||||
// races with it goes around and waits for completeCleanup to finish.
|
||||
g1h->increment_total_collections();
|
||||
|
||||
// We reclaimed old regions so we should calculate the sizes to make
|
||||
// sure we update the old gen/space data.
|
||||
g1h->g1mm()->update_sizes();
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
Universe::heap()->prepare_for_verify();
|
||||
Universe::verify(VerifyOption_G1UsePrevMarking,
|
||||
" VerifyDuringGC:(after)");
|
||||
}
|
||||
|
||||
g1h->check_bitmaps("Cleanup End");
|
||||
|
||||
g1h->verify_region_sets_optional();
|
||||
|
||||
// We need to make this be a "collection" so any collection pause that
|
||||
// races with it goes around and waits for completeCleanup to finish.
|
||||
g1h->increment_total_collections();
|
||||
|
||||
// Clean out dead classes and update Metaspace sizes.
|
||||
ClassLoaderDataGraph::purge();
|
||||
MetaspaceGC::compute_new_size();
|
||||
|
||||
// We reclaimed old regions so we should calculate the sizes to make
|
||||
// sure we update the old gen/space data.
|
||||
g1h->g1mm()->update_sizes();
|
||||
|
||||
g1h->trace_heap_after_concurrent_cycle();
|
||||
}
|
||||
|
||||
@ -2445,6 +2456,26 @@ void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
|
||||
_g1h->set_par_threads(0);
|
||||
}
|
||||
|
||||
void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
|
||||
G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
|
||||
}
|
||||
|
||||
// Helper class to get rid of some boilerplate code.
|
||||
class G1RemarkGCTraceTime : public GCTraceTime {
|
||||
static bool doit_and_prepend(bool doit) {
|
||||
if (doit) {
|
||||
gclog_or_tty->put(' ');
|
||||
}
|
||||
return doit;
|
||||
}
|
||||
|
||||
public:
|
||||
G1RemarkGCTraceTime(const char* title, bool doit)
|
||||
: GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
|
||||
G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
|
||||
}
|
||||
};
|
||||
|
||||
void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
||||
if (has_overflown()) {
|
||||
// Skip processing the discovered references if we have
|
||||
@ -2557,9 +2588,28 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
||||
return;
|
||||
}
|
||||
|
||||
g1h->unlink_string_and_symbol_table(&g1_is_alive,
|
||||
/* process_strings */ false, // currently strings are always roots
|
||||
/* process_symbols */ true);
|
||||
assert(_markStack.isEmpty(), "Marking should have completed");
|
||||
|
||||
// Unload Klasses, String, Symbols, Code Cache, etc.
|
||||
|
||||
G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
|
||||
|
||||
bool purged_classes;
|
||||
|
||||
{
|
||||
G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
|
||||
purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
|
||||
}
|
||||
|
||||
{
|
||||
G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
|
||||
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
|
||||
}
|
||||
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
|
||||
G1StringDedup::unlink(&g1_is_alive);
|
||||
}
|
||||
}
|
||||
|
||||
void ConcurrentMark::swapMarkBitMaps() {
|
||||
@ -2568,6 +2618,57 @@ void ConcurrentMark::swapMarkBitMaps() {
|
||||
_nextMarkBitMap = (CMBitMap*) temp;
|
||||
}
|
||||
|
||||
class CMObjectClosure;
|
||||
|
||||
// Closure for iterating over objects, currently only used for
|
||||
// processing SATB buffers.
|
||||
class CMObjectClosure : public ObjectClosure {
|
||||
private:
|
||||
CMTask* _task;
|
||||
|
||||
public:
|
||||
void do_object(oop obj) {
|
||||
_task->deal_with_reference(obj);
|
||||
}
|
||||
|
||||
CMObjectClosure(CMTask* task) : _task(task) { }
|
||||
};
|
||||
|
||||
class G1RemarkThreadsClosure : public ThreadClosure {
|
||||
CMObjectClosure _cm_obj;
|
||||
G1CMOopClosure _cm_cl;
|
||||
MarkingCodeBlobClosure _code_cl;
|
||||
int _thread_parity;
|
||||
bool _is_par;
|
||||
|
||||
public:
|
||||
G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
|
||||
_cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
|
||||
_thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
|
||||
|
||||
void do_thread(Thread* thread) {
|
||||
if (thread->is_Java_thread()) {
|
||||
if (thread->claim_oops_do(_is_par, _thread_parity)) {
|
||||
JavaThread* jt = (JavaThread*)thread;
|
||||
|
||||
// In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
|
||||
// however the liveness of oops reachable from nmethods have very complex lifecycles:
|
||||
// * Alive if on the stack of an executing method
|
||||
// * Weakly reachable otherwise
|
||||
// Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
|
||||
// live by the SATB invariant but other oops recorded in nmethods may behave differently.
|
||||
jt->nmethods_do(&_code_cl);
|
||||
|
||||
jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
|
||||
}
|
||||
} else if (thread->is_VM_thread()) {
|
||||
if (thread->claim_oops_do(_is_par, _thread_parity)) {
|
||||
JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class CMRemarkTask: public AbstractGangTask {
|
||||
private:
|
||||
ConcurrentMark* _cm;
|
||||
@ -2579,6 +2680,14 @@ public:
|
||||
if (worker_id < _cm->active_tasks()) {
|
||||
CMTask* task = _cm->task(worker_id);
|
||||
task->record_start_time();
|
||||
{
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
|
||||
G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
|
||||
Threads::threads_do(&threads_f);
|
||||
}
|
||||
|
||||
do {
|
||||
task->do_marking_step(1000000000.0 /* something very large */,
|
||||
true /* do_termination */,
|
||||
@ -2601,6 +2710,8 @@ void ConcurrentMark::checkpointRootsFinalWork() {
|
||||
HandleMark hm;
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
|
||||
|
||||
g1h->ensure_parsability(false);
|
||||
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
@ -3430,20 +3541,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
// Closure for iterating over objects, currently only used for
|
||||
// processing SATB buffers.
|
||||
class CMObjectClosure : public ObjectClosure {
|
||||
private:
|
||||
CMTask* _task;
|
||||
|
||||
public:
|
||||
void do_object(oop obj) {
|
||||
_task->deal_with_reference(obj);
|
||||
}
|
||||
|
||||
CMObjectClosure(CMTask* task) : _task(task) { }
|
||||
};
|
||||
|
||||
G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
|
||||
ConcurrentMark* cm,
|
||||
CMTask* task)
|
||||
@ -3908,15 +4005,6 @@ void CMTask::drain_satb_buffers() {
|
||||
}
|
||||
}
|
||||
|
||||
if (!concurrent() && !has_aborted()) {
|
||||
// We should only do this during remark.
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
satb_mq_set.par_iterate_closure_all_threads(_worker_id);
|
||||
} else {
|
||||
satb_mq_set.iterate_closure_all_threads();
|
||||
}
|
||||
}
|
||||
|
||||
_draining_satb_buffers = false;
|
||||
|
||||
assert(has_aborted() ||
|
||||
|
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
|
||||
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSet.hpp"
|
||||
#include "gc_implementation/shared/gcId.hpp"
|
||||
#include "utilities/taskqueue.hpp"
|
||||
@ -86,19 +87,19 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
|
||||
// Return the address corresponding to the next marked bit at or after
|
||||
// "addr", and before "limit", if "limit" is non-NULL. If there is no
|
||||
// such bit, returns "limit" if that is non-NULL, or else "endWord()".
|
||||
HeapWord* getNextMarkedWordAddress(HeapWord* addr,
|
||||
HeapWord* limit = NULL) const;
|
||||
HeapWord* getNextMarkedWordAddress(const HeapWord* addr,
|
||||
const HeapWord* limit = NULL) const;
|
||||
// Return the address corresponding to the next unmarked bit at or after
|
||||
// "addr", and before "limit", if "limit" is non-NULL. If there is no
|
||||
// such bit, returns "limit" if that is non-NULL, or else "endWord()".
|
||||
HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
|
||||
HeapWord* limit = NULL) const;
|
||||
HeapWord* getNextUnmarkedWordAddress(const HeapWord* addr,
|
||||
const HeapWord* limit = NULL) const;
|
||||
|
||||
// conversion utilities
|
||||
HeapWord* offsetToHeapWord(size_t offset) const {
|
||||
return _bmStartWord + (offset << _shifter);
|
||||
}
|
||||
size_t heapWordToOffset(HeapWord* addr) const {
|
||||
size_t heapWordToOffset(const HeapWord* addr) const {
|
||||
return pointer_delta(addr, _bmStartWord) >> _shifter;
|
||||
}
|
||||
int heapWordDiffToOffsetDiff(size_t diff) const;
|
||||
@ -476,6 +477,7 @@ protected:
|
||||
ForceOverflowSettings _force_overflow_conc;
|
||||
ForceOverflowSettings _force_overflow_stw;
|
||||
|
||||
void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
|
||||
void weakRefsWork(bool clear_all_soft_refs);
|
||||
|
||||
void swapMarkBitMaps();
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1AllocRegion.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.inline.hpp"
|
||||
|
||||
inline HeapWord* G1AllocRegion::allocate(HeapRegion* alloc_region,
|
||||
size_t word_size,
|
||||
|
@ -426,7 +426,7 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
|
||||
q = n;
|
||||
oop obj = oop(q);
|
||||
if (obj->klass_or_null() == NULL) return q;
|
||||
n += obj->size();
|
||||
n += block_size(q);
|
||||
}
|
||||
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
|
||||
// [q, n) is the block that crosses the boundary.
|
||||
|
@ -26,7 +26,8 @@
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1BlockOffsetTable.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.inline.hpp"
|
||||
#include "memory/space.hpp"
|
||||
|
||||
inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
|
||||
@ -112,7 +113,7 @@ forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
|
||||
q = n;
|
||||
oop obj = oop(q);
|
||||
if (obj->klass_or_null() == NULL) return q;
|
||||
n += obj->size();
|
||||
n += block_size(q);
|
||||
}
|
||||
assert(q <= n, "wrong order for q and addr");
|
||||
assert(addr < n, "wrong order for addr and n");
|
||||
|
@ -30,23 +30,52 @@
|
||||
|
||||
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
||||
|
||||
G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL) {
|
||||
G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL), _free(NULL) {
|
||||
_top = bottom();
|
||||
}
|
||||
|
||||
void G1CodeRootChunk::reset() {
|
||||
_next = _prev = NULL;
|
||||
_free = NULL;
|
||||
_top = bottom();
|
||||
}
|
||||
|
||||
void G1CodeRootChunk::nmethods_do(CodeBlobClosure* cl) {
|
||||
nmethod** cur = bottom();
|
||||
NmethodOrLink* cur = bottom();
|
||||
while (cur != _top) {
|
||||
cl->do_code_blob(*cur);
|
||||
if (is_nmethod(cur)) {
|
||||
cl->do_code_blob(cur->_nmethod);
|
||||
}
|
||||
cur++;
|
||||
}
|
||||
}
|
||||
|
||||
bool G1CodeRootChunk::remove_lock_free(nmethod* method) {
|
||||
NmethodOrLink* cur = bottom();
|
||||
|
||||
for (NmethodOrLink* cur = bottom(); cur != _top; cur++) {
|
||||
if (cur->_nmethod == method) {
|
||||
bool result = Atomic::cmpxchg_ptr(NULL, &cur->_nmethod, method) == method;
|
||||
|
||||
if (!result) {
|
||||
// Someone else cleared out this entry.
|
||||
return false;
|
||||
}
|
||||
|
||||
// The method was cleared. Time to link it into the free list.
|
||||
NmethodOrLink* prev_free;
|
||||
do {
|
||||
prev_free = (NmethodOrLink*)_free;
|
||||
cur->_link = prev_free;
|
||||
} while (Atomic::cmpxchg_ptr(cur, &_free, prev_free) != prev_free);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
G1CodeRootChunkManager::G1CodeRootChunkManager() : _free_list(), _num_chunks_handed_out(0) {
|
||||
_free_list.initialize();
|
||||
_free_list.set_size(G1CodeRootChunk::word_size());
|
||||
@ -140,34 +169,43 @@ G1CodeRootSet::~G1CodeRootSet() {
|
||||
|
||||
void G1CodeRootSet::add(nmethod* method) {
|
||||
if (!contains(method)) {
|
||||
// Try to add the nmethod. If there is not enough space, get a new chunk.
|
||||
if (_list.head() == NULL || _list.head()->is_full()) {
|
||||
G1CodeRootChunk* cur = new_chunk();
|
||||
// Find the first chunk that isn't full.
|
||||
G1CodeRootChunk* cur = _list.head();
|
||||
while (cur != NULL) {
|
||||
if (!cur->is_full()) {
|
||||
break;
|
||||
}
|
||||
cur = cur->next();
|
||||
}
|
||||
|
||||
// All chunks are full, get a new chunk.
|
||||
if (cur == NULL) {
|
||||
cur = new_chunk();
|
||||
_list.return_chunk_at_head(cur);
|
||||
}
|
||||
bool result = _list.head()->add(method);
|
||||
|
||||
// Add the nmethod.
|
||||
bool result = cur->add(method);
|
||||
|
||||
guarantee(result, err_msg("Not able to add nmethod "PTR_FORMAT" to newly allocated chunk.", method));
|
||||
|
||||
_length++;
|
||||
}
|
||||
}
|
||||
|
||||
void G1CodeRootSet::remove(nmethod* method) {
|
||||
void G1CodeRootSet::remove_lock_free(nmethod* method) {
|
||||
G1CodeRootChunk* found = find(method);
|
||||
if (found != NULL) {
|
||||
bool result = found->remove(method);
|
||||
guarantee(result, err_msg("could not find nmethod "PTR_FORMAT" during removal although we previously found it", method));
|
||||
// eventually free completely emptied chunk
|
||||
if (found->is_empty()) {
|
||||
_list.remove_chunk(found);
|
||||
free(found);
|
||||
bool result = found->remove_lock_free(method);
|
||||
if (result) {
|
||||
Atomic::dec_ptr((volatile intptr_t*)&_length);
|
||||
}
|
||||
_length--;
|
||||
}
|
||||
assert(!contains(method), err_msg(PTR_FORMAT" still contains nmethod "PTR_FORMAT, this, method));
|
||||
}
|
||||
|
||||
nmethod* G1CodeRootSet::pop() {
|
||||
do {
|
||||
while (true) {
|
||||
G1CodeRootChunk* cur = _list.head();
|
||||
if (cur == NULL) {
|
||||
assert(_length == 0, "when there are no chunks, there should be no elements");
|
||||
@ -180,7 +218,7 @@ nmethod* G1CodeRootSet::pop() {
|
||||
} else {
|
||||
free(_list.get_chunk_at_head());
|
||||
}
|
||||
} while (true);
|
||||
}
|
||||
}
|
||||
|
||||
G1CodeRootChunk* G1CodeRootSet::find(nmethod* method) {
|
||||
|
@ -31,6 +31,14 @@
|
||||
|
||||
class CodeBlobClosure;
|
||||
|
||||
// The elements of the G1CodeRootChunk is either:
|
||||
// 1) nmethod pointers
|
||||
// 2) nodes in an internally chained free list
|
||||
typedef union {
|
||||
nmethod* _nmethod;
|
||||
void* _link;
|
||||
} NmethodOrLink;
|
||||
|
||||
class G1CodeRootChunk : public CHeapObj<mtGC> {
|
||||
private:
|
||||
static const int NUM_ENTRIES = 32;
|
||||
@ -38,16 +46,28 @@ class G1CodeRootChunk : public CHeapObj<mtGC> {
|
||||
G1CodeRootChunk* _next;
|
||||
G1CodeRootChunk* _prev;
|
||||
|
||||
nmethod** _top;
|
||||
NmethodOrLink* _top;
|
||||
// First free position within the chunk.
|
||||
volatile NmethodOrLink* _free;
|
||||
|
||||
nmethod* _data[NUM_ENTRIES];
|
||||
NmethodOrLink _data[NUM_ENTRIES];
|
||||
|
||||
nmethod** bottom() const {
|
||||
return (nmethod**) &(_data[0]);
|
||||
NmethodOrLink* bottom() const {
|
||||
return (NmethodOrLink*) &(_data[0]);
|
||||
}
|
||||
|
||||
nmethod** end() const {
|
||||
return (nmethod**) &(_data[NUM_ENTRIES]);
|
||||
NmethodOrLink* end() const {
|
||||
return (NmethodOrLink*) &(_data[NUM_ENTRIES]);
|
||||
}
|
||||
|
||||
bool is_link(NmethodOrLink* nmethod_or_link) {
|
||||
return nmethod_or_link->_link == NULL ||
|
||||
(bottom() <= nmethod_or_link->_link
|
||||
&& nmethod_or_link->_link < end());
|
||||
}
|
||||
|
||||
bool is_nmethod(NmethodOrLink* nmethod_or_link) {
|
||||
return !is_link(nmethod_or_link);
|
||||
}
|
||||
|
||||
public:
|
||||
@ -85,46 +105,55 @@ class G1CodeRootChunk : public CHeapObj<mtGC> {
|
||||
}
|
||||
|
||||
bool is_full() const {
|
||||
return _top == (nmethod**)end();
|
||||
return _top == end() && _free == NULL;
|
||||
}
|
||||
|
||||
bool contains(nmethod* method) {
|
||||
nmethod** cur = bottom();
|
||||
NmethodOrLink* cur = bottom();
|
||||
while (cur != _top) {
|
||||
if (*cur == method) return true;
|
||||
if (cur->_nmethod == method) return true;
|
||||
cur++;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool add(nmethod* method) {
|
||||
if (is_full()) return false;
|
||||
*_top = method;
|
||||
_top++;
|
||||
if (is_full()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_free != NULL) {
|
||||
// Take from internally chained free list
|
||||
NmethodOrLink* first_free = (NmethodOrLink*)_free;
|
||||
_free = (NmethodOrLink*)_free->_link;
|
||||
first_free->_nmethod = method;
|
||||
} else {
|
||||
// Take from top.
|
||||
_top->_nmethod = method;
|
||||
_top++;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool remove(nmethod* method) {
|
||||
nmethod** cur = bottom();
|
||||
while (cur != _top) {
|
||||
if (*cur == method) {
|
||||
memmove(cur, cur + 1, (_top - (cur + 1)) * sizeof(nmethod**));
|
||||
_top--;
|
||||
return true;
|
||||
}
|
||||
cur++;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
bool remove_lock_free(nmethod* method);
|
||||
|
||||
void nmethods_do(CodeBlobClosure* blk);
|
||||
|
||||
nmethod* pop() {
|
||||
if (is_empty()) {
|
||||
return NULL;
|
||||
if (_free != NULL) {
|
||||
// Kill the free list.
|
||||
_free = NULL;
|
||||
}
|
||||
_top--;
|
||||
return *_top;
|
||||
|
||||
while (!is_empty()) {
|
||||
_top--;
|
||||
if (is_nmethod(_top)) {
|
||||
return _top->_nmethod;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
};
|
||||
|
||||
@ -193,7 +222,7 @@ class G1CodeRootSet VALUE_OBJ_CLASS_SPEC {
|
||||
// method is likely to be repeatedly called with the same nmethod.
|
||||
void add(nmethod* method);
|
||||
|
||||
void remove(nmethod* method);
|
||||
void remove_lock_free(nmethod* method);
|
||||
nmethod* pop();
|
||||
|
||||
bool contains(nmethod* method);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -31,7 +31,6 @@
|
||||
#include "gc_implementation/g1/g1BiasedArray.hpp"
|
||||
#include "gc_implementation/g1/g1HRPrinter.hpp"
|
||||
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.hpp"
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc_implementation/g1/g1YCTypes.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.hpp"
|
||||
@ -211,6 +210,7 @@ class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<bool> {
|
||||
class RefineCardTableEntryClosure;
|
||||
|
||||
class G1CollectedHeap : public SharedHeap {
|
||||
friend class VM_CollectForMetadataAllocation;
|
||||
friend class VM_G1CollectForAllocation;
|
||||
friend class VM_G1CollectFull;
|
||||
friend class VM_G1IncCollectionPause;
|
||||
@ -220,7 +220,7 @@ class G1CollectedHeap : public SharedHeap {
|
||||
friend class OldGCAllocRegion;
|
||||
|
||||
// Closures used in implementation.
|
||||
template <G1Barrier barrier, bool do_mark_object>
|
||||
template <G1Barrier barrier, G1Mark do_mark_object>
|
||||
friend class G1ParCopyClosure;
|
||||
friend class G1IsAliveClosure;
|
||||
friend class G1EvacuateFollowersClosure;
|
||||
@ -347,6 +347,9 @@ private:
|
||||
// It initializes the GC alloc regions at the start of a GC.
|
||||
void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
|
||||
|
||||
// Setup the retained old gc alloc region as the currrent old gc alloc region.
|
||||
void use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info);
|
||||
|
||||
// It releases the GC alloc regions at the end of a GC.
|
||||
void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
|
||||
|
||||
@ -828,12 +831,13 @@ protected:
|
||||
// param is for use with parallel roots processing, and should be
|
||||
// the "i" of the calling parallel worker thread's work(i) function.
|
||||
// In the sequential case this param will be ignored.
|
||||
void g1_process_strong_roots(bool is_scavenging,
|
||||
ScanningOption so,
|
||||
OopClosure* scan_non_heap_roots,
|
||||
OopsInHeapRegionClosure* scan_rs,
|
||||
G1KlassScanClosure* scan_klasses,
|
||||
uint worker_i);
|
||||
void g1_process_roots(OopClosure* scan_non_heap_roots,
|
||||
OopClosure* scan_non_heap_weak_roots,
|
||||
OopsInHeapRegionClosure* scan_rs,
|
||||
CLDClosure* scan_strong_clds,
|
||||
CLDClosure* scan_weak_clds,
|
||||
CodeBlobClosure* scan_strong_code,
|
||||
uint worker_i);
|
||||
|
||||
// Notifies all the necessary spaces that the committed space has
|
||||
// been updated (either expanded or shrunk). It should be called
|
||||
@ -1026,7 +1030,7 @@ protected:
|
||||
// of G1CollectedHeap::_gc_time_stamp.
|
||||
unsigned int* _worker_cset_start_region_time_stamp;
|
||||
|
||||
enum G1H_process_strong_roots_tasks {
|
||||
enum G1H_process_roots_tasks {
|
||||
G1H_PS_filter_satb_buffers,
|
||||
G1H_PS_refProcessor_oops_do,
|
||||
// Leave this one last.
|
||||
@ -1608,10 +1612,6 @@ public:
|
||||
// Free up superfluous code root memory.
|
||||
void purge_code_root_memory();
|
||||
|
||||
// During an initial mark pause, mark all the code roots that
|
||||
// point into regions *not* in the collection set.
|
||||
void mark_strong_code_roots(uint worker_id);
|
||||
|
||||
// Rebuild the strong code root lists for each region
|
||||
// after a full GC.
|
||||
void rebuild_strong_code_roots();
|
||||
@ -1620,6 +1620,9 @@ public:
|
||||
// in symbol table, possibly in parallel.
|
||||
void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
|
||||
|
||||
// Parallel phase of unloading/cleaning after G1 concurrent mark.
|
||||
void parallel_cleaning(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool class_unloading_occurred);
|
||||
|
||||
// Redirty logged cards in the refinement queue.
|
||||
void redirty_logged_cards();
|
||||
// Verification
|
||||
@ -1715,256 +1718,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
class G1ParScanThreadState : public StackObj {
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
RefToScanQueue* _refs;
|
||||
DirtyCardQueue _dcq;
|
||||
G1SATBCardTableModRefBS* _ct_bs;
|
||||
G1RemSet* _g1_rem;
|
||||
|
||||
G1ParGCAllocBuffer _surviving_alloc_buffer;
|
||||
G1ParGCAllocBuffer _tenured_alloc_buffer;
|
||||
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
|
||||
ageTable _age_table;
|
||||
|
||||
G1ParScanClosure _scanner;
|
||||
|
||||
size_t _alloc_buffer_waste;
|
||||
size_t _undo_waste;
|
||||
|
||||
OopsInHeapRegionClosure* _evac_failure_cl;
|
||||
|
||||
int _hash_seed;
|
||||
uint _queue_num;
|
||||
|
||||
size_t _term_attempts;
|
||||
|
||||
double _start;
|
||||
double _start_strong_roots;
|
||||
double _strong_roots_time;
|
||||
double _start_term;
|
||||
double _term_time;
|
||||
|
||||
// Map from young-age-index (0 == not young, 1 is youngest) to
|
||||
// surviving words. base is what we get back from the malloc call
|
||||
size_t* _surviving_young_words_base;
|
||||
// this points into the array, as we use the first few entries for padding
|
||||
size_t* _surviving_young_words;
|
||||
|
||||
#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
|
||||
|
||||
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
|
||||
|
||||
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
|
||||
|
||||
DirtyCardQueue& dirty_card_queue() { return _dcq; }
|
||||
G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
|
||||
|
||||
template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
|
||||
|
||||
template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
|
||||
// If the new value of the field points to the same region or
|
||||
// is the to-space, we don't need to include it in the Rset updates.
|
||||
if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
|
||||
size_t card_index = ctbs()->index_for(p);
|
||||
// If the card hasn't been added to the buffer, do it.
|
||||
if (ctbs()->mark_card_deferred(card_index)) {
|
||||
dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
|
||||
|
||||
~G1ParScanThreadState() {
|
||||
retire_alloc_buffers();
|
||||
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
|
||||
}
|
||||
|
||||
RefToScanQueue* refs() { return _refs; }
|
||||
ageTable* age_table() { return &_age_table; }
|
||||
|
||||
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
|
||||
return _alloc_buffers[purpose];
|
||||
}
|
||||
|
||||
size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
|
||||
size_t undo_waste() const { return _undo_waste; }
|
||||
|
||||
#ifdef ASSERT
|
||||
bool verify_ref(narrowOop* ref) const;
|
||||
bool verify_ref(oop* ref) const;
|
||||
bool verify_task(StarTask ref) const;
|
||||
#endif // ASSERT
|
||||
|
||||
template <class T> void push_on_queue(T* ref) {
|
||||
assert(verify_ref(ref), "sanity");
|
||||
refs()->push(ref);
|
||||
}
|
||||
|
||||
template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
|
||||
|
||||
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
|
||||
HeapWord* obj = NULL;
|
||||
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
|
||||
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
|
||||
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
|
||||
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
|
||||
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
|
||||
|
||||
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
|
||||
if (buf == NULL) return NULL; // Let caller handle allocation failure.
|
||||
// Otherwise.
|
||||
alloc_buf->set_word_size(gclab_word_size);
|
||||
alloc_buf->set_buf(buf);
|
||||
|
||||
obj = alloc_buf->allocate(word_sz);
|
||||
assert(obj != NULL, "buffer was definitely big enough...");
|
||||
} else {
|
||||
obj = _g1h->par_allocate_during_gc(purpose, word_sz);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
|
||||
HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
|
||||
if (obj != NULL) return obj;
|
||||
return allocate_slow(purpose, word_sz);
|
||||
}
|
||||
|
||||
void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
|
||||
if (alloc_buffer(purpose)->contains(obj)) {
|
||||
assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
|
||||
"should contain whole object");
|
||||
alloc_buffer(purpose)->undo_allocation(obj, word_sz);
|
||||
} else {
|
||||
CollectedHeap::fill_with_object(obj, word_sz);
|
||||
add_to_undo_waste(word_sz);
|
||||
}
|
||||
}
|
||||
|
||||
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
|
||||
_evac_failure_cl = evac_failure_cl;
|
||||
}
|
||||
OopsInHeapRegionClosure* evac_failure_closure() {
|
||||
return _evac_failure_cl;
|
||||
}
|
||||
|
||||
int* hash_seed() { return &_hash_seed; }
|
||||
uint queue_num() { return _queue_num; }
|
||||
|
||||
size_t term_attempts() const { return _term_attempts; }
|
||||
void note_term_attempt() { _term_attempts++; }
|
||||
|
||||
void start_strong_roots() {
|
||||
_start_strong_roots = os::elapsedTime();
|
||||
}
|
||||
void end_strong_roots() {
|
||||
_strong_roots_time += (os::elapsedTime() - _start_strong_roots);
|
||||
}
|
||||
double strong_roots_time() const { return _strong_roots_time; }
|
||||
|
||||
void start_term_time() {
|
||||
note_term_attempt();
|
||||
_start_term = os::elapsedTime();
|
||||
}
|
||||
void end_term_time() {
|
||||
_term_time += (os::elapsedTime() - _start_term);
|
||||
}
|
||||
double term_time() const { return _term_time; }
|
||||
|
||||
double elapsed_time() const {
|
||||
return os::elapsedTime() - _start;
|
||||
}
|
||||
|
||||
static void
|
||||
print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
|
||||
void
|
||||
print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
|
||||
|
||||
size_t* surviving_young_words() {
|
||||
// We add on to hide entry 0 which accumulates surviving words for
|
||||
// age -1 regions (i.e. non-young ones)
|
||||
return _surviving_young_words;
|
||||
}
|
||||
|
||||
private:
|
||||
void retire_alloc_buffers() {
|
||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||
size_t waste = _alloc_buffers[ap]->words_remaining();
|
||||
add_to_alloc_buffer_waste(waste);
|
||||
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
|
||||
true /* end_of_gc */,
|
||||
false /* retain */);
|
||||
}
|
||||
}
|
||||
|
||||
#define G1_PARTIAL_ARRAY_MASK 0x2
|
||||
|
||||
inline bool has_partial_array_mask(oop* ref) const {
|
||||
return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
|
||||
}
|
||||
|
||||
// We never encode partial array oops as narrowOop*, so return false immediately.
|
||||
// This allows the compiler to create optimized code when popping references from
|
||||
// the work queue.
|
||||
inline bool has_partial_array_mask(narrowOop* ref) const {
|
||||
assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Only implement set_partial_array_mask() for regular oops, not for narrowOops.
|
||||
// We always encode partial arrays as regular oop, to allow the
|
||||
// specialization for has_partial_array_mask() for narrowOops above.
|
||||
// This means that unintentional use of this method with narrowOops are caught
|
||||
// by the compiler.
|
||||
inline oop* set_partial_array_mask(oop obj) const {
|
||||
assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
|
||||
return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
|
||||
}
|
||||
|
||||
inline oop clear_partial_array_mask(oop* ref) const {
|
||||
return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
|
||||
}
|
||||
|
||||
inline void do_oop_partial_array(oop* p);
|
||||
|
||||
// This method is applied to the fields of the objects that have just been copied.
|
||||
template <class T> void do_oop_evac(T* p, HeapRegion* from) {
|
||||
assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
|
||||
"Reference should not be NULL here as such are never pushed to the task queue.");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
|
||||
// Although we never intentionally push references outside of the collection
|
||||
// set, due to (benign) races in the claim mechanism during RSet scanning more
|
||||
// than one thread might claim the same card. So the same card may be
|
||||
// processed multiple times. So redo this check.
|
||||
if (_g1h->in_cset_fast_test(obj)) {
|
||||
oop forwardee;
|
||||
if (obj->is_forwarded()) {
|
||||
forwardee = obj->forwardee();
|
||||
} else {
|
||||
forwardee = copy_to_survivor_space(obj);
|
||||
}
|
||||
assert(forwardee != NULL, "forwardee should not be NULL");
|
||||
oopDesc::encode_store_heap_oop(p, forwardee);
|
||||
}
|
||||
|
||||
assert(obj != NULL, "Must be");
|
||||
update_rs(from, p, queue_num());
|
||||
}
|
||||
public:
|
||||
|
||||
oop copy_to_survivor_space(oop const obj);
|
||||
|
||||
template <class T> inline void deal_with_reference(T* ref_to_scan);
|
||||
|
||||
inline void deal_with_reference(StarTask ref);
|
||||
|
||||
public:
|
||||
void trim_queue();
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|
||||
|
@ -29,7 +29,6 @@
|
||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
@ -289,89 +288,4 @@ inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
|
||||
return is_obj_ill(obj, heap_region_containing(obj));
|
||||
}
|
||||
|
||||
template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
|
||||
if (!from->is_survivor()) {
|
||||
_g1_rem->par_write_ref(from, p, tid);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
|
||||
if (G1DeferredRSUpdate) {
|
||||
deferred_rs_update(from, p, tid);
|
||||
} else {
|
||||
immediate_rs_update(from, p, tid);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
|
||||
assert(has_partial_array_mask(p), "invariant");
|
||||
oop from_obj = clear_partial_array_mask(p);
|
||||
|
||||
assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
|
||||
assert(from_obj->is_objArray(), "must be obj array");
|
||||
objArrayOop from_obj_array = objArrayOop(from_obj);
|
||||
// The from-space object contains the real length.
|
||||
int length = from_obj_array->length();
|
||||
|
||||
assert(from_obj->is_forwarded(), "must be forwarded");
|
||||
oop to_obj = from_obj->forwardee();
|
||||
assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
|
||||
objArrayOop to_obj_array = objArrayOop(to_obj);
|
||||
// We keep track of the next start index in the length field of the
|
||||
// to-space object.
|
||||
int next_index = to_obj_array->length();
|
||||
assert(0 <= next_index && next_index < length,
|
||||
err_msg("invariant, next index: %d, length: %d", next_index, length));
|
||||
|
||||
int start = next_index;
|
||||
int end = length;
|
||||
int remainder = end - start;
|
||||
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
|
||||
if (remainder > 2 * ParGCArrayScanChunk) {
|
||||
end = start + ParGCArrayScanChunk;
|
||||
to_obj_array->set_length(end);
|
||||
// Push the remainder before we process the range in case another
|
||||
// worker has run out of things to do and can steal it.
|
||||
oop* from_obj_p = set_partial_array_mask(from_obj);
|
||||
push_on_queue(from_obj_p);
|
||||
} else {
|
||||
assert(length == end, "sanity");
|
||||
// We'll process the final range for this object. Restore the length
|
||||
// so that the heap remains parsable in case of evacuation failure.
|
||||
to_obj_array->set_length(end);
|
||||
}
|
||||
_scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
|
||||
// Process indexes [start,end). It will also process the header
|
||||
// along with the first chunk (i.e., the chunk with start == 0).
|
||||
// Note that at this point the length field of to_obj_array is not
|
||||
// correct given that we are using it to keep track of the next
|
||||
// start index. oop_iterate_range() (thankfully!) ignores the length
|
||||
// field and only relies on the start / end parameters. It does
|
||||
// however return the size of the object which will be incorrect. So
|
||||
// we have to ignore it even if we wanted to use it.
|
||||
to_obj_array->oop_iterate_range(&_scanner, start, end);
|
||||
}
|
||||
|
||||
template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
|
||||
if (!has_partial_array_mask(ref_to_scan)) {
|
||||
// Note: we can use "raw" versions of "region_containing" because
|
||||
// "obj_to_scan" is definitely in the heap, and is not in a
|
||||
// humongous region.
|
||||
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
|
||||
do_oop_evac(ref_to_scan, r);
|
||||
} else {
|
||||
do_oop_partial_array((oop*)ref_to_scan);
|
||||
}
|
||||
}
|
||||
|
||||
inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
|
||||
assert(verify_task(ref), "sanity");
|
||||
if (ref.is_narrow()) {
|
||||
deal_with_reference((narrowOop*)ref);
|
||||
} else {
|
||||
deal_with_reference((oop*)ref);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
|
||||
|
@ -71,6 +71,9 @@ private:
|
||||
bool _during_initial_mark;
|
||||
bool _during_conc_mark;
|
||||
uint _worker_id;
|
||||
HeapWord* _end_of_last_gap;
|
||||
HeapWord* _last_gap_threshold;
|
||||
HeapWord* _last_obj_threshold;
|
||||
|
||||
public:
|
||||
RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
|
||||
@ -83,7 +86,10 @@ public:
|
||||
_update_rset_cl(update_rset_cl),
|
||||
_during_initial_mark(during_initial_mark),
|
||||
_during_conc_mark(during_conc_mark),
|
||||
_worker_id(worker_id) { }
|
||||
_worker_id(worker_id),
|
||||
_end_of_last_gap(hr->bottom()),
|
||||
_last_gap_threshold(hr->bottom()),
|
||||
_last_obj_threshold(hr->bottom()) { }
|
||||
|
||||
size_t marked_bytes() { return _marked_bytes; }
|
||||
|
||||
@ -107,7 +113,12 @@ public:
|
||||
HeapWord* obj_addr = (HeapWord*) obj;
|
||||
assert(_hr->is_in(obj_addr), "sanity");
|
||||
size_t obj_size = obj->size();
|
||||
_hr->update_bot_for_object(obj_addr, obj_size);
|
||||
HeapWord* obj_end = obj_addr + obj_size;
|
||||
|
||||
if (_end_of_last_gap != obj_addr) {
|
||||
// there was a gap before obj_addr
|
||||
_last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr);
|
||||
}
|
||||
|
||||
if (obj->is_forwarded() && obj->forwardee() == obj) {
|
||||
// The object failed to move.
|
||||
@ -115,7 +126,9 @@ public:
|
||||
// We consider all objects that we find self-forwarded to be
|
||||
// live. What we'll do is that we'll update the prev marking
|
||||
// info so that they are all under PTAMS and explicitly marked.
|
||||
_cm->markPrev(obj);
|
||||
if (!_cm->isPrevMarked(obj)) {
|
||||
_cm->markPrev(obj);
|
||||
}
|
||||
if (_during_initial_mark) {
|
||||
// For the next marking info we'll only mark the
|
||||
// self-forwarded objects explicitly if we are during
|
||||
@ -145,13 +158,18 @@ public:
|
||||
// remembered set entries missing given that we skipped cards on
|
||||
// the collection set. So, we'll recreate such entries now.
|
||||
obj->oop_iterate(_update_rset_cl);
|
||||
assert(_cm->isPrevMarked(obj), "Should be marked!");
|
||||
} else {
|
||||
|
||||
// The object has been either evacuated or is dead. Fill it with a
|
||||
// dummy object.
|
||||
MemRegion mr((HeapWord*) obj, obj_size);
|
||||
MemRegion mr(obj_addr, obj_size);
|
||||
CollectedHeap::fill_with_object(mr);
|
||||
|
||||
// must nuke all dead objects which we skipped when iterating over the region
|
||||
_cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
|
||||
}
|
||||
_end_of_last_gap = obj_end;
|
||||
_last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
|
||||
}
|
||||
};
|
||||
|
||||
@ -182,13 +200,6 @@ public:
|
||||
during_conc_mark,
|
||||
_worker_id);
|
||||
|
||||
MemRegion mr(hr->bottom(), hr->end());
|
||||
// We'll recreate the prev marking info so we'll first clear
|
||||
// the prev bitmap range for this region. We never mark any
|
||||
// CSet objects explicitly so the next bitmap range should be
|
||||
// cleared anyway.
|
||||
_cm->clearRangePrevBitmap(mr);
|
||||
|
||||
hr->note_self_forwarding_removal_start(during_initial_mark,
|
||||
during_conc_mark);
|
||||
_g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr);
|
||||
|
@ -167,7 +167,6 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
|
||||
_last_update_rs_processed_buffers(_max_gc_threads, "%d"),
|
||||
_last_scan_rs_times_ms(_max_gc_threads, "%.1lf"),
|
||||
_last_strong_code_root_scan_times_ms(_max_gc_threads, "%.1lf"),
|
||||
_last_strong_code_root_mark_times_ms(_max_gc_threads, "%.1lf"),
|
||||
_last_obj_copy_times_ms(_max_gc_threads, "%.1lf"),
|
||||
_last_termination_times_ms(_max_gc_threads, "%.1lf"),
|
||||
_last_termination_attempts(_max_gc_threads, SIZE_FORMAT),
|
||||
@ -194,7 +193,6 @@ void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
|
||||
_last_update_rs_processed_buffers.reset();
|
||||
_last_scan_rs_times_ms.reset();
|
||||
_last_strong_code_root_scan_times_ms.reset();
|
||||
_last_strong_code_root_mark_times_ms.reset();
|
||||
_last_obj_copy_times_ms.reset();
|
||||
_last_termination_times_ms.reset();
|
||||
_last_termination_attempts.reset();
|
||||
@ -215,7 +213,6 @@ void G1GCPhaseTimes::note_gc_end() {
|
||||
_last_update_rs_processed_buffers.verify();
|
||||
_last_scan_rs_times_ms.verify();
|
||||
_last_strong_code_root_scan_times_ms.verify();
|
||||
_last_strong_code_root_mark_times_ms.verify();
|
||||
_last_obj_copy_times_ms.verify();
|
||||
_last_termination_times_ms.verify();
|
||||
_last_termination_attempts.verify();
|
||||
@ -230,7 +227,6 @@ void G1GCPhaseTimes::note_gc_end() {
|
||||
_last_update_rs_times_ms.get(i) +
|
||||
_last_scan_rs_times_ms.get(i) +
|
||||
_last_strong_code_root_scan_times_ms.get(i) +
|
||||
_last_strong_code_root_mark_times_ms.get(i) +
|
||||
_last_obj_copy_times_ms.get(i) +
|
||||
_last_termination_times_ms.get(i);
|
||||
|
||||
@ -302,9 +298,6 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
|
||||
if (_last_satb_filtering_times_ms.sum() > 0.0) {
|
||||
_last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)");
|
||||
}
|
||||
if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
|
||||
_last_strong_code_root_mark_times_ms.print(2, "Code Root Marking (ms)");
|
||||
}
|
||||
_last_update_rs_times_ms.print(2, "Update RS (ms)");
|
||||
_last_update_rs_processed_buffers.print(3, "Processed Buffers");
|
||||
_last_scan_rs_times_ms.print(2, "Scan RS (ms)");
|
||||
@ -322,9 +315,6 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
|
||||
if (_last_satb_filtering_times_ms.sum() > 0.0) {
|
||||
_last_satb_filtering_times_ms.print(1, "SATB Filtering (ms)");
|
||||
}
|
||||
if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
|
||||
_last_strong_code_root_mark_times_ms.print(1, "Code Root Marking (ms)");
|
||||
}
|
||||
_last_update_rs_times_ms.print(1, "Update RS (ms)");
|
||||
_last_update_rs_processed_buffers.print(2, "Processed Buffers");
|
||||
_last_scan_rs_times_ms.print(1, "Scan RS (ms)");
|
||||
|
@ -120,7 +120,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
WorkerDataArray<int> _last_update_rs_processed_buffers;
|
||||
WorkerDataArray<double> _last_scan_rs_times_ms;
|
||||
WorkerDataArray<double> _last_strong_code_root_scan_times_ms;
|
||||
WorkerDataArray<double> _last_strong_code_root_mark_times_ms;
|
||||
WorkerDataArray<double> _last_obj_copy_times_ms;
|
||||
WorkerDataArray<double> _last_termination_times_ms;
|
||||
WorkerDataArray<size_t> _last_termination_attempts;
|
||||
@ -199,10 +198,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
_last_strong_code_root_scan_times_ms.set(worker_i, ms);
|
||||
}
|
||||
|
||||
void record_strong_code_root_mark_time(uint worker_i, double ms) {
|
||||
_last_strong_code_root_mark_times_ms.set(worker_i, ms);
|
||||
}
|
||||
|
||||
void record_obj_copy_time(uint worker_i, double ms) {
|
||||
_last_obj_copy_times_ms.set(worker_i, ms);
|
||||
}
|
||||
@ -369,10 +364,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
return _last_strong_code_root_scan_times_ms.average();
|
||||
}
|
||||
|
||||
double average_last_strong_code_root_mark_time(){
|
||||
return _last_strong_code_root_mark_times_ms.average();
|
||||
}
|
||||
|
||||
double average_last_obj_copy_time() {
|
||||
return _last_obj_copy_times_ms.average();
|
||||
}
|
||||
|
@ -129,13 +129,15 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
|
||||
SharedHeap* sh = SharedHeap::heap();
|
||||
|
||||
// Need cleared claim bits for the strong roots processing
|
||||
// Need cleared claim bits for the roots processing
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
sh->process_strong_roots(true, // activate StrongRootsScope
|
||||
SharedHeap::SO_SystemClasses,
|
||||
MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
|
||||
sh->process_strong_roots(true, // activate StrongRootsScope
|
||||
SharedHeap::SO_None,
|
||||
&GenMarkSweep::follow_root_closure,
|
||||
&GenMarkSweep::follow_klass_closure);
|
||||
&GenMarkSweep::follow_cld_closure,
|
||||
&follow_code_closure);
|
||||
|
||||
// Process reference objects found during marking
|
||||
ReferenceProcessor* rp = GenMarkSweep::ref_processor();
|
||||
@ -304,13 +306,15 @@ void G1MarkSweep::mark_sweep_phase3() {
|
||||
|
||||
SharedHeap* sh = SharedHeap::heap();
|
||||
|
||||
// Need cleared claim bits for the strong roots processing
|
||||
// Need cleared claim bits for the roots processing
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
sh->process_strong_roots(true, // activate StrongRootsScope
|
||||
SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache,
|
||||
&GenMarkSweep::adjust_pointer_closure,
|
||||
&GenMarkSweep::adjust_klass_closure);
|
||||
CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
|
||||
sh->process_all_roots(true, // activate StrongRootsScope
|
||||
SharedHeap::SO_AllCodeCache,
|
||||
&GenMarkSweep::adjust_pointer_closure,
|
||||
&GenMarkSweep::adjust_cld_closure,
|
||||
&adjust_code_closure);
|
||||
|
||||
assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
|
||||
g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
|
||||
|
@ -25,7 +25,28 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1ParScanThreadState.hpp"
|
||||
|
||||
G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||
G1ParClosureSuper(g1, par_scan_state), _scanned_klass(NULL),
|
||||
_cm(_g1->concurrent_mark()) {}
|
||||
|
||||
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1) :
|
||||
_g1(g1), _par_scan_state(NULL), _worker_id(UINT_MAX) { }
|
||||
|
||||
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||
_g1(g1), _par_scan_state(NULL),
|
||||
_worker_id(UINT_MAX) {
|
||||
set_par_scan_thread_state(par_scan_state);
|
||||
}
|
||||
|
||||
void G1ParClosureSuper::set_par_scan_thread_state(G1ParScanThreadState* par_scan_state) {
|
||||
assert(_par_scan_state == NULL, "_par_scan_state must only be set once");
|
||||
assert(par_scan_state != NULL, "Must set par_scan_state to non-NULL.");
|
||||
|
||||
_par_scan_state = par_scan_state;
|
||||
_worker_id = par_scan_state->queue_num();
|
||||
|
||||
assert(_worker_id < MAX2((uint)ParallelGCThreads, 1u),
|
||||
err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, MAX2((uint)ParallelGCThreads, 1u)));
|
||||
}
|
||||
|
@ -25,6 +25,8 @@
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
|
||||
|
||||
#include "memory/iterator.hpp"
|
||||
|
||||
class HeapRegion;
|
||||
class G1CollectedHeap;
|
||||
class G1RemSet;
|
||||
@ -51,8 +53,13 @@ protected:
|
||||
G1ParScanThreadState* _par_scan_state;
|
||||
uint _worker_id;
|
||||
public:
|
||||
// Initializes the instance, leaving _par_scan_state uninitialized. Must be done
|
||||
// later using the set_par_scan_thread_state() method.
|
||||
G1ParClosureSuper(G1CollectedHeap* g1);
|
||||
G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
|
||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||
|
||||
void set_par_scan_thread_state(G1ParScanThreadState* par_scan_state);
|
||||
};
|
||||
|
||||
class G1ParPushHeapRSClosure : public G1ParClosureSuper {
|
||||
@ -68,9 +75,8 @@ public:
|
||||
|
||||
class G1ParScanClosure : public G1ParClosureSuper {
|
||||
public:
|
||||
G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
|
||||
G1ParClosureSuper(g1, par_scan_state)
|
||||
{
|
||||
G1ParScanClosure(G1CollectedHeap* g1, ReferenceProcessor* rp) :
|
||||
G1ParClosureSuper(g1) {
|
||||
assert(_ref_processor == NULL, "sanity");
|
||||
_ref_processor = rp;
|
||||
}
|
||||
@ -102,7 +108,7 @@ protected:
|
||||
template <class T> void do_klass_barrier(T* p, oop new_obj);
|
||||
};
|
||||
|
||||
template <G1Barrier barrier, bool do_mark_object>
|
||||
template <G1Barrier barrier, G1Mark do_mark_object>
|
||||
class G1ParCopyClosure : public G1ParCopyHelper {
|
||||
private:
|
||||
template <class T> void do_oop_work(T* p);
|
||||
@ -117,19 +123,19 @@ public:
|
||||
template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
|
||||
G1CollectedHeap* g1() { return _g1; };
|
||||
G1ParScanThreadState* pss() { return _par_scan_state; }
|
||||
ReferenceProcessor* rp() { return _ref_processor; };
|
||||
};
|
||||
|
||||
typedef G1ParCopyClosure<G1BarrierNone, false> G1ParScanExtRootClosure;
|
||||
typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure;
|
||||
|
||||
|
||||
typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
|
||||
typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
|
||||
|
||||
typedef G1ParCopyClosure<G1BarrierNone, G1MarkNone> G1ParScanExtRootClosure;
|
||||
typedef G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> G1ParScanAndMarkExtRootClosure;
|
||||
typedef G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> G1ParScanAndMarkWeakExtRootClosure;
|
||||
// We use a separate closure to handle references during evacuation
|
||||
// failure processing.
|
||||
|
||||
typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
|
||||
typedef G1ParCopyClosure<G1BarrierEvac, G1MarkNone> G1ParScanHeapEvacFailureClosure;
|
||||
|
||||
class FilterIntoCSClosure: public ExtendedOopClosure {
|
||||
G1CollectedHeap* _g1;
|
||||
@ -160,10 +166,11 @@ public:
|
||||
};
|
||||
|
||||
// Closure for iterating over object fields during concurrent marking
|
||||
class G1CMOopClosure : public ExtendedOopClosure {
|
||||
class G1CMOopClosure : public MetadataAwareOopClosure {
|
||||
protected:
|
||||
ConcurrentMark* _cm;
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
ConcurrentMark* _cm;
|
||||
CMTask* _task;
|
||||
public:
|
||||
G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task);
|
||||
@ -173,7 +180,7 @@ public:
|
||||
};
|
||||
|
||||
// Closure to scan the root regions during concurrent marking
|
||||
class G1RootRegionScanClosure : public ExtendedOopClosure {
|
||||
class G1RootRegionScanClosure : public MetadataAwareOopClosure {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
ConcurrentMark* _cm;
|
||||
|
@ -28,9 +28,11 @@
|
||||
#include "gc_implementation/g1/concurrentMark.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.hpp"
|
||||
#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
|
||||
/*
|
||||
@ -107,10 +109,6 @@ inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
|
||||
|
||||
template <class T>
|
||||
inline void G1CMOopClosure::do_oop_nv(T* p) {
|
||||
assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
|
||||
assert(!_g1h->is_on_master_free_list(
|
||||
_g1h->heap_region_containing((HeapWord*) p)), "invariant");
|
||||
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
if (_cm->verbose_high()) {
|
||||
gclog_or_tty->print_cr("[%u] we're looking at location "
|
||||
|
@ -0,0 +1,306 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oop.pcgc.inline.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
|
||||
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
|
||||
: _g1h(g1h),
|
||||
_refs(g1h->task_queue(queue_num)),
|
||||
_dcq(&g1h->dirty_card_queue_set()),
|
||||
_ct_bs(g1h->g1_barrier_set()),
|
||||
_g1_rem(g1h->g1_rem_set()),
|
||||
_hash_seed(17), _queue_num(queue_num),
|
||||
_term_attempts(0),
|
||||
_surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
|
||||
_tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
|
||||
_age_table(false), _scanner(g1h, rp),
|
||||
_strong_roots_time(0), _term_time(0),
|
||||
_alloc_buffer_waste(0), _undo_waste(0) {
|
||||
_scanner.set_par_scan_thread_state(this);
|
||||
// we allocate G1YoungSurvRateNumRegions plus one entries, since
|
||||
// we "sacrifice" entry 0 to keep track of surviving bytes for
|
||||
// non-young regions (where the age is -1)
|
||||
// We also add a few elements at the beginning and at the end in
|
||||
// an attempt to eliminate cache contention
|
||||
uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
|
||||
uint array_length = PADDING_ELEM_NUM +
|
||||
real_length +
|
||||
PADDING_ELEM_NUM;
|
||||
_surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
|
||||
if (_surviving_young_words_base == NULL)
|
||||
vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
|
||||
"Not enough space for young surv histo.");
|
||||
_surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
|
||||
memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
|
||||
|
||||
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
|
||||
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
|
||||
|
||||
_start = os::elapsedTime();
|
||||
}
|
||||
|
||||
G1ParScanThreadState::~G1ParScanThreadState() {
|
||||
retire_alloc_buffers();
|
||||
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
|
||||
}
|
||||
|
||||
void
|
||||
G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
|
||||
{
|
||||
st->print_raw_cr("GC Termination Stats");
|
||||
st->print_raw_cr(" elapsed --strong roots-- -------termination-------"
|
||||
" ------waste (KiB)------");
|
||||
st->print_raw_cr("thr ms ms % ms % attempts"
|
||||
" total alloc undo");
|
||||
st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
|
||||
" ------- ------- -------");
|
||||
}
|
||||
|
||||
void
|
||||
G1ParScanThreadState::print_termination_stats(int i,
|
||||
outputStream* const st) const
|
||||
{
|
||||
const double elapsed_ms = elapsed_time() * 1000.0;
|
||||
const double s_roots_ms = strong_roots_time() * 1000.0;
|
||||
const double term_ms = term_time() * 1000.0;
|
||||
st->print_cr("%3d %9.2f %9.2f %6.2f "
|
||||
"%9.2f %6.2f " SIZE_FORMAT_W(8) " "
|
||||
SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
|
||||
i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
|
||||
term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
|
||||
(alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
|
||||
alloc_buffer_waste() * HeapWordSize / K,
|
||||
undo_waste() * HeapWordSize / K);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
|
||||
assert(ref != NULL, "invariant");
|
||||
assert(UseCompressedOops, "sanity");
|
||||
assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, p2i(ref)));
|
||||
oop p = oopDesc::load_decode_heap_oop(ref);
|
||||
assert(_g1h->is_in_g1_reserved(p),
|
||||
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool G1ParScanThreadState::verify_ref(oop* ref) const {
|
||||
assert(ref != NULL, "invariant");
|
||||
if (has_partial_array_mask(ref)) {
|
||||
// Must be in the collection set--it's already been copied.
|
||||
oop p = clear_partial_array_mask(ref);
|
||||
assert(_g1h->obj_in_cs(p),
|
||||
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
|
||||
} else {
|
||||
oop p = oopDesc::load_decode_heap_oop(ref);
|
||||
assert(_g1h->is_in_g1_reserved(p),
|
||||
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool G1ParScanThreadState::verify_task(StarTask ref) const {
|
||||
if (ref.is_narrow()) {
|
||||
return verify_ref((narrowOop*) ref);
|
||||
} else {
|
||||
return verify_ref((oop*) ref);
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void G1ParScanThreadState::trim_queue() {
|
||||
assert(_evac_failure_cl != NULL, "not set");
|
||||
|
||||
StarTask ref;
|
||||
do {
|
||||
// Drain the overflow stack first, so other threads can steal.
|
||||
while (_refs->pop_overflow(ref)) {
|
||||
dispatch_reference(ref);
|
||||
}
|
||||
|
||||
while (_refs->pop_local(ref)) {
|
||||
dispatch_reference(ref);
|
||||
}
|
||||
} while (!_refs->is_empty());
|
||||
}
|
||||
|
||||
oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
|
||||
size_t word_sz = old->size();
|
||||
HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
|
||||
// +1 to make the -1 indexes valid...
|
||||
int young_index = from_region->young_index_in_cset()+1;
|
||||
assert( (from_region->is_young() && young_index > 0) ||
|
||||
(!from_region->is_young() && young_index == 0), "invariant" );
|
||||
G1CollectorPolicy* g1p = _g1h->g1_policy();
|
||||
markOop m = old->mark();
|
||||
int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
|
||||
: m->age();
|
||||
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
|
||||
word_sz);
|
||||
HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
|
||||
#ifndef PRODUCT
|
||||
// Should this evacuation fail?
|
||||
if (_g1h->evacuation_should_fail()) {
|
||||
if (obj_ptr != NULL) {
|
||||
undo_allocation(alloc_purpose, obj_ptr, word_sz);
|
||||
obj_ptr = NULL;
|
||||
}
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
if (obj_ptr == NULL) {
|
||||
// This will either forward-to-self, or detect that someone else has
|
||||
// installed a forwarding pointer.
|
||||
return _g1h->handle_evacuation_failure_par(this, old);
|
||||
}
|
||||
|
||||
oop obj = oop(obj_ptr);
|
||||
|
||||
// We're going to allocate linearly, so might as well prefetch ahead.
|
||||
Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
|
||||
|
||||
oop forward_ptr = old->forward_to_atomic(obj);
|
||||
if (forward_ptr == NULL) {
|
||||
Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
|
||||
|
||||
// alloc_purpose is just a hint to allocate() above, recheck the type of region
|
||||
// we actually allocated from and update alloc_purpose accordingly
|
||||
HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
|
||||
alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
|
||||
|
||||
if (g1p->track_object_age(alloc_purpose)) {
|
||||
// We could simply do obj->incr_age(). However, this causes a
|
||||
// performance issue. obj->incr_age() will first check whether
|
||||
// the object has a displaced mark by checking its mark word;
|
||||
// getting the mark word from the new location of the object
|
||||
// stalls. So, given that we already have the mark word and we
|
||||
// are about to install it anyway, it's better to increase the
|
||||
// age on the mark word, when the object does not have a
|
||||
// displaced mark word. We're not expecting many objects to have
|
||||
// a displaced marked word, so that case is not optimized
|
||||
// further (it could be...) and we simply call obj->incr_age().
|
||||
|
||||
if (m->has_displaced_mark_helper()) {
|
||||
// in this case, we have to install the mark word first,
|
||||
// otherwise obj looks to be forwarded (the old mark word,
|
||||
// which contains the forward pointer, was copied)
|
||||
obj->set_mark(m);
|
||||
obj->incr_age();
|
||||
} else {
|
||||
m = m->incr_age();
|
||||
obj->set_mark(m);
|
||||
}
|
||||
age_table()->add(obj, word_sz);
|
||||
} else {
|
||||
obj->set_mark(m);
|
||||
}
|
||||
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
|
||||
to_region->is_young(),
|
||||
queue_num(),
|
||||
obj);
|
||||
}
|
||||
|
||||
size_t* surv_young_words = surviving_young_words();
|
||||
surv_young_words[young_index] += word_sz;
|
||||
|
||||
if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
|
||||
// We keep track of the next start index in the length field of
|
||||
// the to-space object. The actual length can be found in the
|
||||
// length field of the from-space object.
|
||||
arrayOop(obj)->set_length(0);
|
||||
oop* old_p = set_partial_array_mask(old);
|
||||
push_on_queue(old_p);
|
||||
} else {
|
||||
// No point in using the slower heap_region_containing() method,
|
||||
// given that we know obj is in the heap.
|
||||
_scanner.set_region(_g1h->heap_region_containing_raw(obj));
|
||||
obj->oop_iterate_backwards(&_scanner);
|
||||
}
|
||||
} else {
|
||||
undo_allocation(alloc_purpose, obj_ptr, word_sz);
|
||||
obj = forward_ptr;
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
HeapWord* G1ParScanThreadState::allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
|
||||
HeapWord* obj = NULL;
|
||||
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
|
||||
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
|
||||
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
|
||||
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
|
||||
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
|
||||
|
||||
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
|
||||
if (buf == NULL) {
|
||||
return NULL; // Let caller handle allocation failure.
|
||||
}
|
||||
// Otherwise.
|
||||
alloc_buf->set_word_size(gclab_word_size);
|
||||
alloc_buf->set_buf(buf);
|
||||
|
||||
obj = alloc_buf->allocate(word_sz);
|
||||
assert(obj != NULL, "buffer was definitely big enough...");
|
||||
} else {
|
||||
obj = _g1h->par_allocate_during_gc(purpose, word_sz);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
|
||||
if (alloc_buffer(purpose)->contains(obj)) {
|
||||
assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
|
||||
"should contain whole object");
|
||||
alloc_buffer(purpose)->undo_allocation(obj, word_sz);
|
||||
} else {
|
||||
CollectedHeap::fill_with_object(obj, word_sz);
|
||||
add_to_undo_waste(word_sz);
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
|
||||
HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
|
||||
if (obj != NULL) {
|
||||
return obj;
|
||||
}
|
||||
return allocate_slow(purpose, word_sz);
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::retire_alloc_buffers() {
|
||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||
size_t waste = _alloc_buffers[ap]->words_remaining();
|
||||
add_to_alloc_buffer_waste(waste);
|
||||
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
|
||||
true /* end_of_gc */,
|
||||
false /* retain */);
|
||||
}
|
||||
}
|
@ -0,0 +1,227 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
|
||||
|
||||
#include "gc_implementation/g1/dirtyCardQueue.hpp"
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.hpp"
|
||||
#include "gc_implementation/shared/ageTable.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
|
||||
class HeapRegion;
|
||||
class outputStream;
|
||||
|
||||
class G1ParScanThreadState : public StackObj {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
RefToScanQueue* _refs;
|
||||
DirtyCardQueue _dcq;
|
||||
G1SATBCardTableModRefBS* _ct_bs;
|
||||
G1RemSet* _g1_rem;
|
||||
|
||||
G1ParGCAllocBuffer _surviving_alloc_buffer;
|
||||
G1ParGCAllocBuffer _tenured_alloc_buffer;
|
||||
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
|
||||
ageTable _age_table;
|
||||
|
||||
G1ParScanClosure _scanner;
|
||||
|
||||
size_t _alloc_buffer_waste;
|
||||
size_t _undo_waste;
|
||||
|
||||
OopsInHeapRegionClosure* _evac_failure_cl;
|
||||
|
||||
int _hash_seed;
|
||||
uint _queue_num;
|
||||
|
||||
size_t _term_attempts;
|
||||
|
||||
double _start;
|
||||
double _start_strong_roots;
|
||||
double _strong_roots_time;
|
||||
double _start_term;
|
||||
double _term_time;
|
||||
|
||||
// Map from young-age-index (0 == not young, 1 is youngest) to
|
||||
// surviving words. base is what we get back from the malloc call
|
||||
size_t* _surviving_young_words_base;
|
||||
// this points into the array, as we use the first few entries for padding
|
||||
size_t* _surviving_young_words;
|
||||
|
||||
#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
|
||||
|
||||
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
|
||||
|
||||
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
|
||||
|
||||
DirtyCardQueue& dirty_card_queue() { return _dcq; }
|
||||
G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
|
||||
|
||||
template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
|
||||
|
||||
template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
|
||||
// If the new value of the field points to the same region or
|
||||
// is the to-space, we don't need to include it in the Rset updates.
|
||||
if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
|
||||
size_t card_index = ctbs()->index_for(p);
|
||||
// If the card hasn't been added to the buffer, do it.
|
||||
if (ctbs()->mark_card_deferred(card_index)) {
|
||||
dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
|
||||
~G1ParScanThreadState();
|
||||
|
||||
ageTable* age_table() { return &_age_table; }
|
||||
|
||||
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
|
||||
return _alloc_buffers[purpose];
|
||||
}
|
||||
|
||||
size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
|
||||
size_t undo_waste() const { return _undo_waste; }
|
||||
|
||||
#ifdef ASSERT
|
||||
bool queue_is_empty() const { return _refs->is_empty(); }
|
||||
|
||||
bool verify_ref(narrowOop* ref) const;
|
||||
bool verify_ref(oop* ref) const;
|
||||
bool verify_task(StarTask ref) const;
|
||||
#endif // ASSERT
|
||||
|
||||
template <class T> void push_on_queue(T* ref) {
|
||||
assert(verify_ref(ref), "sanity");
|
||||
_refs->push(ref);
|
||||
}
|
||||
|
||||
template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
|
||||
|
||||
private:
|
||||
|
||||
inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
|
||||
inline HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz);
|
||||
inline void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz);
|
||||
|
||||
public:
|
||||
|
||||
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
|
||||
_evac_failure_cl = evac_failure_cl;
|
||||
}
|
||||
|
||||
OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; }
|
||||
|
||||
int* hash_seed() { return &_hash_seed; }
|
||||
uint queue_num() { return _queue_num; }
|
||||
|
||||
size_t term_attempts() const { return _term_attempts; }
|
||||
void note_term_attempt() { _term_attempts++; }
|
||||
|
||||
void start_strong_roots() {
|
||||
_start_strong_roots = os::elapsedTime();
|
||||
}
|
||||
void end_strong_roots() {
|
||||
_strong_roots_time += (os::elapsedTime() - _start_strong_roots);
|
||||
}
|
||||
double strong_roots_time() const { return _strong_roots_time; }
|
||||
|
||||
void start_term_time() {
|
||||
note_term_attempt();
|
||||
_start_term = os::elapsedTime();
|
||||
}
|
||||
void end_term_time() {
|
||||
_term_time += (os::elapsedTime() - _start_term);
|
||||
}
|
||||
double term_time() const { return _term_time; }
|
||||
|
||||
double elapsed_time() const {
|
||||
return os::elapsedTime() - _start;
|
||||
}
|
||||
|
||||
static void print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
|
||||
void print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
|
||||
|
||||
size_t* surviving_young_words() {
|
||||
// We add on to hide entry 0 which accumulates surviving words for
|
||||
// age -1 regions (i.e. non-young ones)
|
||||
return _surviving_young_words;
|
||||
}
|
||||
|
||||
private:
|
||||
void retire_alloc_buffers();
|
||||
|
||||
#define G1_PARTIAL_ARRAY_MASK 0x2
|
||||
|
||||
inline bool has_partial_array_mask(oop* ref) const {
|
||||
return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
|
||||
}
|
||||
|
||||
// We never encode partial array oops as narrowOop*, so return false immediately.
|
||||
// This allows the compiler to create optimized code when popping references from
|
||||
// the work queue.
|
||||
inline bool has_partial_array_mask(narrowOop* ref) const {
|
||||
assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Only implement set_partial_array_mask() for regular oops, not for narrowOops.
|
||||
// We always encode partial arrays as regular oop, to allow the
|
||||
// specialization for has_partial_array_mask() for narrowOops above.
|
||||
// This means that unintentional use of this method with narrowOops are caught
|
||||
// by the compiler.
|
||||
inline oop* set_partial_array_mask(oop obj) const {
|
||||
assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
|
||||
return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
|
||||
}
|
||||
|
||||
inline oop clear_partial_array_mask(oop* ref) const {
|
||||
return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
|
||||
}
|
||||
|
||||
inline void do_oop_partial_array(oop* p);
|
||||
|
||||
// This method is applied to the fields of the objects that have just been copied.
|
||||
template <class T> inline void do_oop_evac(T* p, HeapRegion* from);
|
||||
|
||||
template <class T> inline void deal_with_reference(T* ref_to_scan);
|
||||
|
||||
inline void dispatch_reference(StarTask ref);
|
||||
public:
|
||||
|
||||
oop copy_to_survivor_space(oop const obj);
|
||||
|
||||
void trim_queue();
|
||||
|
||||
inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
|
@ -0,0 +1,154 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1ParScanThreadState.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
|
||||
if (!from->is_survivor()) {
|
||||
_g1_rem->par_write_ref(from, p, tid);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
|
||||
if (G1DeferredRSUpdate) {
|
||||
deferred_rs_update(from, p, tid);
|
||||
} else {
|
||||
immediate_rs_update(from, p, tid);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
|
||||
assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
|
||||
"Reference should not be NULL here as such are never pushed to the task queue.");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
|
||||
// Although we never intentionally push references outside of the collection
|
||||
// set, due to (benign) races in the claim mechanism during RSet scanning more
|
||||
// than one thread might claim the same card. So the same card may be
|
||||
// processed multiple times. So redo this check.
|
||||
if (_g1h->in_cset_fast_test(obj)) {
|
||||
oop forwardee;
|
||||
if (obj->is_forwarded()) {
|
||||
forwardee = obj->forwardee();
|
||||
} else {
|
||||
forwardee = copy_to_survivor_space(obj);
|
||||
}
|
||||
assert(forwardee != NULL, "forwardee should not be NULL");
|
||||
oopDesc::encode_store_heap_oop(p, forwardee);
|
||||
}
|
||||
|
||||
assert(obj != NULL, "Must be");
|
||||
update_rs(from, p, queue_num());
|
||||
}
|
||||
|
||||
inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
|
||||
assert(has_partial_array_mask(p), "invariant");
|
||||
oop from_obj = clear_partial_array_mask(p);
|
||||
|
||||
assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
|
||||
assert(from_obj->is_objArray(), "must be obj array");
|
||||
objArrayOop from_obj_array = objArrayOop(from_obj);
|
||||
// The from-space object contains the real length.
|
||||
int length = from_obj_array->length();
|
||||
|
||||
assert(from_obj->is_forwarded(), "must be forwarded");
|
||||
oop to_obj = from_obj->forwardee();
|
||||
assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
|
||||
objArrayOop to_obj_array = objArrayOop(to_obj);
|
||||
// We keep track of the next start index in the length field of the
|
||||
// to-space object.
|
||||
int next_index = to_obj_array->length();
|
||||
assert(0 <= next_index && next_index < length,
|
||||
err_msg("invariant, next index: %d, length: %d", next_index, length));
|
||||
|
||||
int start = next_index;
|
||||
int end = length;
|
||||
int remainder = end - start;
|
||||
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
|
||||
if (remainder > 2 * ParGCArrayScanChunk) {
|
||||
end = start + ParGCArrayScanChunk;
|
||||
to_obj_array->set_length(end);
|
||||
// Push the remainder before we process the range in case another
|
||||
// worker has run out of things to do and can steal it.
|
||||
oop* from_obj_p = set_partial_array_mask(from_obj);
|
||||
push_on_queue(from_obj_p);
|
||||
} else {
|
||||
assert(length == end, "sanity");
|
||||
// We'll process the final range for this object. Restore the length
|
||||
// so that the heap remains parsable in case of evacuation failure.
|
||||
to_obj_array->set_length(end);
|
||||
}
|
||||
_scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
|
||||
// Process indexes [start,end). It will also process the header
|
||||
// along with the first chunk (i.e., the chunk with start == 0).
|
||||
// Note that at this point the length field of to_obj_array is not
|
||||
// correct given that we are using it to keep track of the next
|
||||
// start index. oop_iterate_range() (thankfully!) ignores the length
|
||||
// field and only relies on the start / end parameters. It does
|
||||
// however return the size of the object which will be incorrect. So
|
||||
// we have to ignore it even if we wanted to use it.
|
||||
to_obj_array->oop_iterate_range(&_scanner, start, end);
|
||||
}
|
||||
|
||||
template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
|
||||
if (!has_partial_array_mask(ref_to_scan)) {
|
||||
// Note: we can use "raw" versions of "region_containing" because
|
||||
// "obj_to_scan" is definitely in the heap, and is not in a
|
||||
// humongous region.
|
||||
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
|
||||
do_oop_evac(ref_to_scan, r);
|
||||
} else {
|
||||
do_oop_partial_array((oop*)ref_to_scan);
|
||||
}
|
||||
}
|
||||
|
||||
inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
|
||||
assert(verify_task(ref), "sanity");
|
||||
if (ref.is_narrow()) {
|
||||
deal_with_reference((narrowOop*)ref);
|
||||
} else {
|
||||
deal_with_reference((oop*)ref);
|
||||
}
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
|
||||
StarTask stolen_task;
|
||||
while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) {
|
||||
assert(verify_task(stolen_task), "sanity");
|
||||
dispatch_reference(stolen_task);
|
||||
|
||||
// We've just processed a reference and we might have made
|
||||
// available new entries on the queues. So we have to make sure
|
||||
// we drain the queues as necessary.
|
||||
trim_queue();
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP */
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1RemSet.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
|
@ -66,6 +66,17 @@ G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
|
||||
}
|
||||
}
|
||||
|
||||
void G1SATBCardTableModRefBS::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
|
||||
if (!dest_uninitialized) {
|
||||
write_ref_array_pre_work(dst, count);
|
||||
}
|
||||
}
|
||||
void G1SATBCardTableModRefBS::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
|
||||
if (!dest_uninitialized) {
|
||||
write_ref_array_pre_work(dst, count);
|
||||
}
|
||||
}
|
||||
|
||||
bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
|
||||
jbyte val = _byte_map[card_index];
|
||||
// It's already processed
|
||||
|
@ -86,16 +86,8 @@ public:
|
||||
}
|
||||
|
||||
template <class T> void write_ref_array_pre_work(T* dst, int count);
|
||||
virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
|
||||
if (!dest_uninitialized) {
|
||||
write_ref_array_pre_work(dst, count);
|
||||
}
|
||||
}
|
||||
virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
|
||||
if (!dest_uninitialized) {
|
||||
write_ref_array_pre_work(dst, count);
|
||||
}
|
||||
}
|
||||
virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
|
||||
virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized);
|
||||
|
||||
/*
|
||||
Claimed and deferred bits are used together in G1 during the evacuation
|
||||
|
@ -30,14 +30,21 @@
|
||||
// non-virtually, using a mechanism defined in this file. Extend these
|
||||
// macros in the obvious way to add specializations for new closures.
|
||||
|
||||
// Forward declarations.
|
||||
enum G1Barrier {
|
||||
G1BarrierNone,
|
||||
G1BarrierEvac,
|
||||
G1BarrierKlass
|
||||
};
|
||||
|
||||
template<G1Barrier barrier, bool do_mark_object>
|
||||
enum G1Mark {
|
||||
G1MarkNone,
|
||||
G1MarkFromRoot,
|
||||
G1MarkPromotedFromRoot
|
||||
};
|
||||
|
||||
// Forward declarations.
|
||||
|
||||
template<G1Barrier barrier, G1Mark do_mark_object>
|
||||
class G1ParCopyClosure;
|
||||
|
||||
class G1ParScanClosure;
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "gc_implementation/g1/heapRegion.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
#include "gc_implementation/shared/liveRange.hpp"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/space.inline.hpp"
|
||||
@ -61,7 +62,7 @@ HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
|
||||
HeapRegion* hr,
|
||||
HeapWord* cur, HeapWord* top) {
|
||||
oop cur_oop = oop(cur);
|
||||
int oop_size = cur_oop->size();
|
||||
size_t oop_size = hr->block_size(cur);
|
||||
HeapWord* next_obj = cur + oop_size;
|
||||
while (next_obj < top) {
|
||||
// Keep filtering the remembered set.
|
||||
@ -72,7 +73,7 @@ HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
|
||||
}
|
||||
cur = next_obj;
|
||||
cur_oop = oop(cur);
|
||||
oop_size = cur_oop->size();
|
||||
oop_size = hr->block_size(cur);
|
||||
next_obj = cur + oop_size;
|
||||
}
|
||||
return cur;
|
||||
@ -82,7 +83,7 @@ void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
|
||||
HeapWord* bottom,
|
||||
HeapWord* top) {
|
||||
G1CollectedHeap* g1h = _g1;
|
||||
int oop_size;
|
||||
size_t oop_size;
|
||||
ExtendedOopClosure* cl2 = NULL;
|
||||
|
||||
FilterIntoCSClosure intoCSFilt(this, g1h, _cl);
|
||||
@ -102,7 +103,7 @@ void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
|
||||
if (!g1h->is_obj_dead(oop(bottom), _hr)) {
|
||||
oop_size = oop(bottom)->oop_iterate(cl2, mr);
|
||||
} else {
|
||||
oop_size = oop(bottom)->size();
|
||||
oop_size = _hr->block_size(bottom);
|
||||
}
|
||||
|
||||
bottom += oop_size;
|
||||
@ -374,7 +375,7 @@ HeapRegion::HeapRegion(uint hrs_index,
|
||||
// region.
|
||||
hr_clear(false /*par*/, false /*clear_space*/);
|
||||
set_top(bottom());
|
||||
set_saved_mark();
|
||||
record_top_and_timestamp();
|
||||
|
||||
assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
|
||||
}
|
||||
@ -394,38 +395,11 @@ CompactibleSpace* HeapRegion::next_compaction_space() const {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void HeapRegion::save_marks() {
|
||||
set_saved_mark();
|
||||
}
|
||||
|
||||
void HeapRegion::oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl) {
|
||||
HeapWord* p = mr.start();
|
||||
HeapWord* e = mr.end();
|
||||
oop obj;
|
||||
while (p < e) {
|
||||
obj = oop(p);
|
||||
p += obj->oop_iterate(cl);
|
||||
}
|
||||
assert(p == e, "bad memregion: doesn't end on obj boundary");
|
||||
}
|
||||
|
||||
#define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
|
||||
void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
|
||||
ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \
|
||||
}
|
||||
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
|
||||
|
||||
|
||||
void HeapRegion::oop_before_save_marks_iterate(ExtendedOopClosure* cl) {
|
||||
oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
|
||||
}
|
||||
|
||||
void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
|
||||
bool during_conc_mark) {
|
||||
// We always recreate the prev marking info and we'll explicitly
|
||||
// mark all objects we find to be self-forwarded on the prev
|
||||
// bitmap. So all objects need to be below PTAMS.
|
||||
_prev_top_at_mark_start = top();
|
||||
_prev_marked_bytes = 0;
|
||||
|
||||
if (during_initial_mark) {
|
||||
@ -449,6 +423,7 @@ void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
|
||||
assert(0 <= marked_bytes && marked_bytes <= used(),
|
||||
err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
|
||||
marked_bytes, used()));
|
||||
_prev_top_at_mark_start = top();
|
||||
_prev_marked_bytes = marked_bytes;
|
||||
}
|
||||
|
||||
@ -476,7 +451,7 @@ HeapRegion::object_iterate_mem_careful(MemRegion mr,
|
||||
} else if (!g1h->is_obj_dead(obj)) {
|
||||
cl->do_object(obj);
|
||||
}
|
||||
cur += obj->size();
|
||||
cur += block_size(cur);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@ -548,7 +523,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||
return cur;
|
||||
}
|
||||
// Otherwise...
|
||||
next = (cur + obj->size());
|
||||
next = cur + block_size(cur);
|
||||
}
|
||||
|
||||
// If we finish the above loop...We have a parseable object that
|
||||
@ -556,10 +531,9 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||
// inside or spans the entire region.
|
||||
|
||||
assert(obj == oop(cur), "sanity");
|
||||
assert(cur <= start &&
|
||||
obj->klass_or_null() != NULL &&
|
||||
(cur + obj->size()) > start,
|
||||
"Loop postcondition");
|
||||
assert(cur <= start, "Loop postcondition");
|
||||
assert(obj->klass_or_null() != NULL, "Loop postcondition");
|
||||
assert((cur + block_size(cur)) > start, "Loop postcondition");
|
||||
|
||||
if (!g1h->is_obj_dead(obj)) {
|
||||
obj->oop_iterate(cl, mr);
|
||||
@ -573,7 +547,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||
};
|
||||
|
||||
// Otherwise:
|
||||
next = (cur + obj->size());
|
||||
next = cur + block_size(cur);
|
||||
|
||||
if (!g1h->is_obj_dead(obj)) {
|
||||
if (next < end || !obj->is_objArray()) {
|
||||
@ -928,10 +902,11 @@ void HeapRegion::verify(VerifyOption vo,
|
||||
size_t object_num = 0;
|
||||
while (p < top()) {
|
||||
oop obj = oop(p);
|
||||
size_t obj_size = obj->size();
|
||||
size_t obj_size = block_size(p);
|
||||
object_num += 1;
|
||||
|
||||
if (is_humongous != g1->isHumongous(obj_size)) {
|
||||
if (is_humongous != g1->isHumongous(obj_size) &&
|
||||
!g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
|
||||
gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
|
||||
SIZE_FORMAT" words) in a %shumongous region",
|
||||
p, g1->isHumongous(obj_size) ? "" : "non-",
|
||||
@ -942,7 +917,9 @@ void HeapRegion::verify(VerifyOption vo,
|
||||
|
||||
// If it returns false, verify_for_object() will output the
|
||||
// appropriate messasge.
|
||||
if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
|
||||
if (do_bot_verify &&
|
||||
!g1->is_obj_dead(obj, this) &&
|
||||
!_offsets.verify_for_object(p, obj_size)) {
|
||||
*failures = true;
|
||||
return;
|
||||
}
|
||||
@ -950,7 +927,10 @@ void HeapRegion::verify(VerifyOption vo,
|
||||
if (!g1->is_obj_dead_cond(obj, this, vo)) {
|
||||
if (obj->is_oop()) {
|
||||
Klass* klass = obj->klass();
|
||||
if (!klass->is_metaspace_object()) {
|
||||
bool is_metaspace_object = Metaspace::contains(klass) ||
|
||||
(vo == VerifyOption_G1UsePrevMarking &&
|
||||
ClassLoaderDataGraph::unload_list_contains(klass));
|
||||
if (!is_metaspace_object) {
|
||||
gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
|
||||
"not metadata", klass, (void *)obj);
|
||||
*failures = true;
|
||||
@ -1064,7 +1044,9 @@ void HeapRegion::verify() const {
|
||||
// away eventually.
|
||||
|
||||
void G1OffsetTableContigSpace::clear(bool mangle_space) {
|
||||
ContiguousSpace::clear(mangle_space);
|
||||
set_top(bottom());
|
||||
set_saved_mark_word(bottom());
|
||||
CompactibleSpace::clear(mangle_space);
|
||||
_offsets.zero_bottom_entry();
|
||||
_offsets.initialize_threshold();
|
||||
}
|
||||
@ -1102,10 +1084,10 @@ HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
|
||||
if (_gc_time_stamp < g1h->get_gc_time_stamp())
|
||||
return top();
|
||||
else
|
||||
return ContiguousSpace::saved_mark_word();
|
||||
return Space::saved_mark_word();
|
||||
}
|
||||
|
||||
void G1OffsetTableContigSpace::set_saved_mark() {
|
||||
void G1OffsetTableContigSpace::record_top_and_timestamp() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
|
||||
|
||||
@ -1117,7 +1099,7 @@ void G1OffsetTableContigSpace::set_saved_mark() {
|
||||
// of region. If it does so after _gc_time_stamp = ..., then it
|
||||
// will pick up the right saved_mark_word() as the high water mark
|
||||
// of the region. Either way, the behavior will be correct.
|
||||
ContiguousSpace::set_saved_mark();
|
||||
Space::set_saved_mark_word(top());
|
||||
OrderAccess::storestore();
|
||||
_gc_time_stamp = curr_gc_time_stamp;
|
||||
// No need to do another barrier to flush the writes above. If
|
||||
@ -1128,6 +1110,26 @@ void G1OffsetTableContigSpace::set_saved_mark() {
|
||||
}
|
||||
}
|
||||
|
||||
void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
|
||||
object_iterate(blk);
|
||||
}
|
||||
|
||||
void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) {
|
||||
HeapWord* p = bottom();
|
||||
while (p < top()) {
|
||||
if (block_is_obj(p)) {
|
||||
blk->do_object(oop(p));
|
||||
}
|
||||
p += block_size(p);
|
||||
}
|
||||
}
|
||||
|
||||
#define block_is_always_obj(q) true
|
||||
void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) {
|
||||
SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size);
|
||||
}
|
||||
#undef block_is_always_obj
|
||||
|
||||
G1OffsetTableContigSpace::
|
||||
G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
MemRegion mr) :
|
||||
@ -1137,7 +1139,8 @@ G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
{
|
||||
_offsets.set_space(this);
|
||||
// false ==> we'll do the clearing if there's clearing to be done.
|
||||
ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle);
|
||||
CompactibleSpace::initialize(mr, false, SpaceDecorator::Mangle);
|
||||
_top = bottom();
|
||||
_offsets.zero_bottom_entry();
|
||||
_offsets.initialize_threshold();
|
||||
}
|
||||
|
@ -46,8 +46,6 @@
|
||||
// The solution is to remove this method from the definition
|
||||
// of a Space.
|
||||
|
||||
class CompactibleSpace;
|
||||
class ContiguousSpace;
|
||||
class HeapRegionRemSet;
|
||||
class HeapRegionRemSetIterator;
|
||||
class HeapRegion;
|
||||
@ -125,9 +123,9 @@ public:
|
||||
// the regions anyway) and at the end of a Full GC. The current scheme
|
||||
// that uses sequential unsigned ints will fail only if we have 4b
|
||||
// evacuation pauses between two cleanups, which is _highly_ unlikely.
|
||||
|
||||
class G1OffsetTableContigSpace: public ContiguousSpace {
|
||||
class G1OffsetTableContigSpace: public CompactibleSpace {
|
||||
friend class VMStructs;
|
||||
HeapWord* _top;
|
||||
protected:
|
||||
G1BlockOffsetArrayContigSpace _offsets;
|
||||
Mutex _par_alloc_lock;
|
||||
@ -144,11 +142,32 @@ class G1OffsetTableContigSpace: public ContiguousSpace {
|
||||
G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
MemRegion mr);
|
||||
|
||||
void set_top(HeapWord* value) { _top = value; }
|
||||
HeapWord* top() const { return _top; }
|
||||
|
||||
protected:
|
||||
HeapWord** top_addr() { return &_top; }
|
||||
// Allocation helpers (return NULL if full).
|
||||
inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
|
||||
inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
|
||||
|
||||
public:
|
||||
void reset_after_compaction() { set_top(compaction_top()); }
|
||||
|
||||
size_t used() const { return byte_size(bottom(), top()); }
|
||||
size_t free() const { return byte_size(top(), end()); }
|
||||
bool is_free_block(const HeapWord* p) const { return p >= top(); }
|
||||
|
||||
MemRegion used_region() const { return MemRegion(bottom(), top()); }
|
||||
|
||||
void object_iterate(ObjectClosure* blk);
|
||||
void safe_object_iterate(ObjectClosure* blk);
|
||||
|
||||
void set_bottom(HeapWord* value);
|
||||
void set_end(HeapWord* value);
|
||||
|
||||
virtual HeapWord* saved_mark_word() const;
|
||||
virtual void set_saved_mark();
|
||||
void record_top_and_timestamp();
|
||||
void reset_gc_time_stamp() { _gc_time_stamp = 0; }
|
||||
unsigned get_gc_time_stamp() { return _gc_time_stamp; }
|
||||
|
||||
@ -168,6 +187,8 @@ class G1OffsetTableContigSpace: public ContiguousSpace {
|
||||
HeapWord* block_start(const void* p);
|
||||
HeapWord* block_start_const(const void* p) const;
|
||||
|
||||
void prepare_for_compaction(CompactPoint* cp);
|
||||
|
||||
// Add offset table update.
|
||||
virtual HeapWord* allocate(size_t word_size);
|
||||
HeapWord* par_allocate(size_t word_size);
|
||||
@ -202,10 +223,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
ContinuesHumongous
|
||||
};
|
||||
|
||||
// Requires that the region "mr" be dense with objects, and begin and end
|
||||
// with an object.
|
||||
void oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl);
|
||||
|
||||
// The remembered set for this region.
|
||||
// (Might want to make this "inline" later, to avoid some alloc failure
|
||||
// issues.)
|
||||
@ -230,11 +247,9 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
bool _evacuation_failed;
|
||||
|
||||
// A heap region may be a member one of a number of special subsets, each
|
||||
// represented as linked lists through the field below. Currently, these
|
||||
// sets include:
|
||||
// represented as linked lists through the field below. Currently, there
|
||||
// is only one set:
|
||||
// The collection set.
|
||||
// The set of allocation regions used in a collection pause.
|
||||
// Spaces that may contain gray objects.
|
||||
HeapRegion* _next_in_special_set;
|
||||
|
||||
// next region in the young "generation" region set
|
||||
@ -353,14 +368,15 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
ParMarkRootClaimValue = 9
|
||||
};
|
||||
|
||||
inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
|
||||
assert(is_young(), "we can only skip BOT updates on young regions");
|
||||
return ContiguousSpace::par_allocate(word_size);
|
||||
}
|
||||
inline HeapWord* allocate_no_bot_updates(size_t word_size) {
|
||||
assert(is_young(), "we can only skip BOT updates on young regions");
|
||||
return ContiguousSpace::allocate(word_size);
|
||||
}
|
||||
// All allocated blocks are occupied by objects in a HeapRegion
|
||||
bool block_is_obj(const HeapWord* p) const;
|
||||
|
||||
// Returns the object size for all valid block starts
|
||||
// and the amount of unallocated words if called on top()
|
||||
size_t block_size(const HeapWord* p) const;
|
||||
|
||||
inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
|
||||
inline HeapWord* allocate_no_bot_updates(size_t word_size);
|
||||
|
||||
// If this region is a member of a HeapRegionSeq, the index in that
|
||||
// sequence, otherwise -1.
|
||||
@ -569,9 +585,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
|
||||
HeapWord* orig_end() { return _orig_end; }
|
||||
|
||||
// Allows logical separation between objects allocated before and after.
|
||||
void save_marks();
|
||||
|
||||
// Reset HR stuff to default values.
|
||||
void hr_clear(bool par, bool clear_space, bool locked = false);
|
||||
void par_clear();
|
||||
@ -580,10 +593,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
|
||||
HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
|
||||
|
||||
// Apply "cl->do_oop" to (the addresses of) all reference fields in objects
|
||||
// allocated in the current region before the last call to "save_mark".
|
||||
void oop_before_save_marks_iterate(ExtendedOopClosure* cl);
|
||||
|
||||
// Note the start or end of marking. This tells the heap region
|
||||
// that the collector is about to start or has finished (concurrently)
|
||||
// marking the heap.
|
||||
@ -769,10 +778,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
_predicted_bytes_to_copy = bytes;
|
||||
}
|
||||
|
||||
#define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
|
||||
virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
|
||||
SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
|
||||
|
||||
virtual CompactibleSpace* next_compaction_space() const;
|
||||
|
||||
virtual void reset_after_compaction();
|
||||
|
@ -26,9 +26,48 @@
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "memory/space.hpp"
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
|
||||
// This version requires locking.
|
||||
inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t size,
|
||||
HeapWord* const end_value) {
|
||||
HeapWord* obj = top();
|
||||
if (pointer_delta(end_value, obj) >= size) {
|
||||
HeapWord* new_top = obj + size;
|
||||
set_top(new_top);
|
||||
assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
|
||||
return obj;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// This version is lock-free.
|
||||
inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t size,
|
||||
HeapWord* const end_value) {
|
||||
do {
|
||||
HeapWord* obj = top();
|
||||
if (pointer_delta(end_value, obj) >= size) {
|
||||
HeapWord* new_top = obj + size;
|
||||
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
|
||||
// result can be one of two:
|
||||
// the old top value: the exchange succeeded
|
||||
// otherwise: the new value of the top is returned.
|
||||
if (result == obj) {
|
||||
assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
|
||||
return obj;
|
||||
}
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
} while (true);
|
||||
}
|
||||
|
||||
inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
|
||||
HeapWord* res = ContiguousSpace::allocate(size);
|
||||
HeapWord* res = allocate_impl(size, end());
|
||||
if (res != NULL) {
|
||||
_offsets.alloc_block(res, size);
|
||||
}
|
||||
@ -40,12 +79,7 @@ inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
|
||||
// this is used for larger LAB allocations only.
|
||||
inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
|
||||
MutexLocker x(&_par_alloc_lock);
|
||||
// Given that we take the lock no need to use par_allocate() here.
|
||||
HeapWord* res = ContiguousSpace::allocate(size);
|
||||
if (res != NULL) {
|
||||
_offsets.alloc_block(res, size);
|
||||
}
|
||||
return res;
|
||||
return allocate(size);
|
||||
}
|
||||
|
||||
inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
|
||||
@ -57,6 +91,41 @@ G1OffsetTableContigSpace::block_start_const(const void* p) const {
|
||||
return _offsets.block_start_const(p);
|
||||
}
|
||||
|
||||
inline bool
|
||||
HeapRegion::block_is_obj(const HeapWord* p) const {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
return !g1h->is_obj_dead(oop(p), this);
|
||||
}
|
||||
|
||||
inline size_t
|
||||
HeapRegion::block_size(const HeapWord *addr) const {
|
||||
// Old regions' dead objects may have dead classes
|
||||
// We need to find the next live object in some other
|
||||
// manner than getting the oop size
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
if (g1h->is_obj_dead(oop(addr), this)) {
|
||||
HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
|
||||
getNextMarkedWordAddress(addr, prev_top_at_mark_start());
|
||||
|
||||
assert(next > addr, "must get the next live object");
|
||||
|
||||
return pointer_delta(next, addr);
|
||||
} else if (addr == top()) {
|
||||
return pointer_delta(end(), addr);
|
||||
}
|
||||
return oop(addr)->size();
|
||||
}
|
||||
|
||||
inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
|
||||
assert(is_young(), "we can only skip BOT updates on young regions");
|
||||
return par_allocate_impl(word_size, end());
|
||||
}
|
||||
|
||||
inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {
|
||||
assert(is_young(), "we can only skip BOT updates on young regions");
|
||||
return allocate_impl(word_size, end());
|
||||
}
|
||||
|
||||
inline void HeapRegion::note_start_of_marking() {
|
||||
_next_marked_bytes = 0;
|
||||
_next_top_at_mark_start = top();
|
||||
|
@ -931,7 +931,10 @@ void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
|
||||
|
||||
void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
|
||||
assert(nm != NULL, "sanity");
|
||||
_code_roots.remove(nm);
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
|
||||
_code_roots.remove_lock_free(nm);
|
||||
|
||||
// Check that there were no duplicates
|
||||
guarantee(!_code_roots.contains(nm), "duplicate entry found");
|
||||
}
|
||||
|
@ -285,37 +285,6 @@ void SATBMarkQueueSet::set_par_closure(int i, ObjectClosure* par_closure) {
|
||||
_par_closures[i] = par_closure;
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::iterate_closure_all_threads() {
|
||||
for(JavaThread* t = Threads::first(); t; t = t->next()) {
|
||||
t->satb_mark_queue().apply_closure_and_empty(_closure);
|
||||
}
|
||||
shared_satb_queue()->apply_closure_and_empty(_closure);
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::par_iterate_closure_all_threads(uint worker) {
|
||||
SharedHeap* sh = SharedHeap::heap();
|
||||
int parity = sh->strong_roots_parity();
|
||||
|
||||
for(JavaThread* t = Threads::first(); t; t = t->next()) {
|
||||
if (t->claim_oops_do(true, parity)) {
|
||||
t->satb_mark_queue().apply_closure_and_empty(_par_closures[worker]);
|
||||
}
|
||||
}
|
||||
|
||||
// We also need to claim the VMThread so that its parity is updated
|
||||
// otherwise the next call to Thread::possibly_parallel_oops_do inside
|
||||
// a StrongRootsScope might skip the VMThread because it has a stale
|
||||
// parity that matches the parity set by the StrongRootsScope
|
||||
//
|
||||
// Whichever worker succeeds in claiming the VMThread gets to do
|
||||
// the shared queue.
|
||||
|
||||
VMThread* vmt = VMThread::vm_thread();
|
||||
if (vmt->claim_oops_do(true, parity)) {
|
||||
shared_satb_queue()->apply_closure_and_empty(_par_closures[worker]);
|
||||
}
|
||||
}
|
||||
|
||||
bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
|
||||
uint worker) {
|
||||
BufferNode* nd = NULL;
|
||||
|
@ -33,7 +33,9 @@ class SATBMarkQueueSet;
|
||||
|
||||
// A ptrQueue whose elements are "oops", pointers to object heads.
|
||||
class ObjPtrQueue: public PtrQueue {
|
||||
friend class Threads;
|
||||
friend class SATBMarkQueueSet;
|
||||
friend class G1RemarkThreadsClosure;
|
||||
|
||||
private:
|
||||
// Filter out unwanted entries from the buffer.
|
||||
@ -119,13 +121,6 @@ public:
|
||||
// closures, one for each parallel GC thread.
|
||||
void set_par_closure(int i, ObjectClosure* closure);
|
||||
|
||||
// Apply the registered closure to all entries on each
|
||||
// currently-active buffer and then empty the buffer. It should only
|
||||
// be called serially and at a safepoint.
|
||||
void iterate_closure_all_threads();
|
||||
// Parallel version of the above.
|
||||
void par_iterate_closure_all_threads(uint worker);
|
||||
|
||||
// If there exists some completed buffer, pop it, then apply the
|
||||
// registered closure to all its elements, and return true. If no
|
||||
// completed buffers exist, return false.
|
||||
|
@ -34,6 +34,8 @@
|
||||
static_field(HeapRegion, GrainBytes, size_t) \
|
||||
static_field(HeapRegion, LogOfHRGrainBytes, int) \
|
||||
\
|
||||
nonstatic_field(G1OffsetTableContigSpace, _top, HeapWord*) \
|
||||
\
|
||||
nonstatic_field(G1HeapRegionTable, _base, address) \
|
||||
nonstatic_field(G1HeapRegionTable, _length, size_t) \
|
||||
nonstatic_field(G1HeapRegionTable, _biased_base, address) \
|
||||
@ -69,7 +71,8 @@
|
||||
\
|
||||
declare_type(G1CollectedHeap, SharedHeap) \
|
||||
\
|
||||
declare_type(HeapRegion, ContiguousSpace) \
|
||||
declare_type(G1OffsetTableContigSpace, CompactibleSpace) \
|
||||
declare_type(HeapRegion, G1OffsetTableContigSpace) \
|
||||
declare_toplevel_type(HeapRegionSeq) \
|
||||
declare_toplevel_type(HeapRegionSetBase) \
|
||||
declare_toplevel_type(HeapRegionSetCount) \
|
||||
|
@ -1,657 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
|
||||
#include "gc_implementation/parNew/asParNewGeneration.hpp"
|
||||
#include "gc_implementation/parNew/parNewGeneration.hpp"
|
||||
#include "gc_implementation/shared/markSweep.inline.hpp"
|
||||
#include "gc_implementation/shared/spaceDecorator.hpp"
|
||||
#include "memory/defNewGeneration.inline.hpp"
|
||||
#include "memory/referencePolicy.hpp"
|
||||
#include "oops/markOop.inline.hpp"
|
||||
#include "oops/oop.pcgc.inline.hpp"
|
||||
|
||||
ASParNewGeneration::ASParNewGeneration(ReservedSpace rs,
|
||||
size_t initial_byte_size,
|
||||
size_t min_byte_size,
|
||||
int level) :
|
||||
ParNewGeneration(rs, initial_byte_size, level),
|
||||
_min_gen_size(min_byte_size) {}
|
||||
|
||||
const char* ASParNewGeneration::name() const {
|
||||
return "adaptive size par new generation";
|
||||
}
|
||||
|
||||
void ASParNewGeneration::adjust_desired_tenuring_threshold() {
|
||||
assert(UseAdaptiveSizePolicy,
|
||||
"Should only be used with UseAdaptiveSizePolicy");
|
||||
}
|
||||
|
||||
void ASParNewGeneration::resize(size_t eden_size, size_t survivor_size) {
|
||||
// Resize the generation if needed. If the generation resize
|
||||
// reports false, do not attempt to resize the spaces.
|
||||
if (resize_generation(eden_size, survivor_size)) {
|
||||
// Then we lay out the spaces inside the generation
|
||||
resize_spaces(eden_size, survivor_size);
|
||||
|
||||
space_invariants();
|
||||
|
||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||
gclog_or_tty->print_cr("Young generation size: "
|
||||
"desired eden: " SIZE_FORMAT " survivor: " SIZE_FORMAT
|
||||
" used: " SIZE_FORMAT " capacity: " SIZE_FORMAT
|
||||
" gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
|
||||
eden_size, survivor_size, used(), capacity(),
|
||||
max_gen_size(), min_gen_size());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size_t ASParNewGeneration::available_to_min_gen() {
|
||||
assert(virtual_space()->committed_size() >= min_gen_size(), "Invariant");
|
||||
return virtual_space()->committed_size() - min_gen_size();
|
||||
}
|
||||
|
||||
// This method assumes that from-space has live data and that
|
||||
// any shrinkage of the young gen is limited by location of
|
||||
// from-space.
|
||||
size_t ASParNewGeneration::available_to_live() const {
|
||||
#undef SHRINKS_AT_END_OF_EDEN
|
||||
#ifdef SHRINKS_AT_END_OF_EDEN
|
||||
size_t delta_in_survivor = 0;
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
const size_t space_alignment = heap->intra_heap_alignment();
|
||||
const size_t gen_alignment = heap->object_heap_alignment();
|
||||
|
||||
MutableSpace* space_shrinking = NULL;
|
||||
if (from_space()->end() > to_space()->end()) {
|
||||
space_shrinking = from_space();
|
||||
} else {
|
||||
space_shrinking = to_space();
|
||||
}
|
||||
|
||||
// Include any space that is committed but not included in
|
||||
// the survivor spaces.
|
||||
assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
|
||||
"Survivor space beyond high end");
|
||||
size_t unused_committed = pointer_delta(virtual_space()->high(),
|
||||
space_shrinking->end(), sizeof(char));
|
||||
|
||||
if (space_shrinking->is_empty()) {
|
||||
// Don't let the space shrink to 0
|
||||
assert(space_shrinking->capacity_in_bytes() >= space_alignment,
|
||||
"Space is too small");
|
||||
delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
|
||||
} else {
|
||||
delta_in_survivor = pointer_delta(space_shrinking->end(),
|
||||
space_shrinking->top(),
|
||||
sizeof(char));
|
||||
}
|
||||
|
||||
size_t delta_in_bytes = unused_committed + delta_in_survivor;
|
||||
delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment);
|
||||
return delta_in_bytes;
|
||||
#else
|
||||
// The only space available for shrinking is in to-space if it
|
||||
// is above from-space.
|
||||
if (to()->bottom() > from()->bottom()) {
|
||||
const size_t alignment = os::vm_page_size();
|
||||
if (to()->capacity() < alignment) {
|
||||
return 0;
|
||||
} else {
|
||||
return to()->capacity() - alignment;
|
||||
}
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Return the number of bytes available for resizing down the young
|
||||
// generation. This is the minimum of
|
||||
// input "bytes"
|
||||
// bytes to the minimum young gen size
|
||||
// bytes to the size currently being used + some small extra
|
||||
size_t ASParNewGeneration::limit_gen_shrink (size_t bytes) {
|
||||
// Allow shrinkage into the current eden but keep eden large enough
|
||||
// to maintain the minimum young gen size
|
||||
bytes = MIN3(bytes, available_to_min_gen(), available_to_live());
|
||||
return align_size_down(bytes, os::vm_page_size());
|
||||
}
|
||||
|
||||
// Note that the the alignment used is the OS page size as
|
||||
// opposed to an alignment associated with the virtual space
|
||||
// (as is done in the ASPSYoungGen/ASPSOldGen)
|
||||
bool ASParNewGeneration::resize_generation(size_t eden_size,
|
||||
size_t survivor_size) {
|
||||
const size_t alignment = os::vm_page_size();
|
||||
size_t orig_size = virtual_space()->committed_size();
|
||||
bool size_changed = false;
|
||||
|
||||
// There used to be this guarantee there.
|
||||
// guarantee ((eden_size + 2*survivor_size) <= _max_gen_size, "incorrect input arguments");
|
||||
// Code below forces this requirement. In addition the desired eden
|
||||
// size and desired survivor sizes are desired goals and may
|
||||
// exceed the total generation size.
|
||||
|
||||
assert(min_gen_size() <= orig_size && orig_size <= max_gen_size(),
|
||||
"just checking");
|
||||
|
||||
// Adjust new generation size
|
||||
const size_t eden_plus_survivors =
|
||||
align_size_up(eden_size + 2 * survivor_size, alignment);
|
||||
size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_gen_size()),
|
||||
min_gen_size());
|
||||
assert(desired_size <= max_gen_size(), "just checking");
|
||||
|
||||
if (desired_size > orig_size) {
|
||||
// Grow the generation
|
||||
size_t change = desired_size - orig_size;
|
||||
assert(change % alignment == 0, "just checking");
|
||||
if (expand(change)) {
|
||||
return false; // Error if we fail to resize!
|
||||
}
|
||||
size_changed = true;
|
||||
} else if (desired_size < orig_size) {
|
||||
size_t desired_change = orig_size - desired_size;
|
||||
assert(desired_change % alignment == 0, "just checking");
|
||||
|
||||
desired_change = limit_gen_shrink(desired_change);
|
||||
|
||||
if (desired_change > 0) {
|
||||
virtual_space()->shrink_by(desired_change);
|
||||
reset_survivors_after_shrink();
|
||||
|
||||
size_changed = true;
|
||||
}
|
||||
} else {
|
||||
if (Verbose && PrintGC) {
|
||||
if (orig_size == max_gen_size()) {
|
||||
gclog_or_tty->print_cr("ASParNew generation size at maximum: "
|
||||
SIZE_FORMAT "K", orig_size/K);
|
||||
} else if (orig_size == min_gen_size()) {
|
||||
gclog_or_tty->print_cr("ASParNew generation size at minium: "
|
||||
SIZE_FORMAT "K", orig_size/K);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (size_changed) {
|
||||
MemRegion cmr((HeapWord*)virtual_space()->low(),
|
||||
(HeapWord*)virtual_space()->high());
|
||||
GenCollectedHeap::heap()->barrier_set()->resize_covered_region(cmr);
|
||||
|
||||
if (Verbose && PrintGC) {
|
||||
size_t current_size = virtual_space()->committed_size();
|
||||
gclog_or_tty->print_cr("ASParNew generation size changed: "
|
||||
SIZE_FORMAT "K->" SIZE_FORMAT "K",
|
||||
orig_size/K, current_size/K);
|
||||
}
|
||||
}
|
||||
|
||||
guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
|
||||
virtual_space()->committed_size() == max_gen_size(), "Sanity");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ASParNewGeneration::reset_survivors_after_shrink() {
|
||||
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
HeapWord* new_end = (HeapWord*)virtual_space()->high();
|
||||
|
||||
if (from()->end() > to()->end()) {
|
||||
assert(new_end >= from()->end(), "Shrinking past from-space");
|
||||
} else {
|
||||
assert(new_end >= to()->bottom(), "Shrink was too large");
|
||||
// Was there a shrink of the survivor space?
|
||||
if (new_end < to()->end()) {
|
||||
MemRegion mr(to()->bottom(), new_end);
|
||||
to()->initialize(mr,
|
||||
SpaceDecorator::DontClear,
|
||||
SpaceDecorator::DontMangle);
|
||||
}
|
||||
}
|
||||
}
|
||||
void ASParNewGeneration::resize_spaces(size_t requested_eden_size,
|
||||
size_t requested_survivor_size) {
|
||||
assert(UseAdaptiveSizePolicy, "sanity check");
|
||||
assert(requested_eden_size > 0 && requested_survivor_size > 0,
|
||||
"just checking");
|
||||
CollectedHeap* heap = Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Sanity");
|
||||
|
||||
|
||||
// We require eden and to space to be empty
|
||||
if ((!eden()->is_empty()) || (!to()->is_empty())) {
|
||||
return;
|
||||
}
|
||||
|
||||
size_t cur_eden_size = eden()->capacity();
|
||||
|
||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||
gclog_or_tty->print_cr("ASParNew::resize_spaces(requested_eden_size: "
|
||||
SIZE_FORMAT
|
||||
", requested_survivor_size: " SIZE_FORMAT ")",
|
||||
requested_eden_size, requested_survivor_size);
|
||||
gclog_or_tty->print_cr(" eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
|
||||
SIZE_FORMAT,
|
||||
p2i(eden()->bottom()),
|
||||
p2i(eden()->end()),
|
||||
pointer_delta(eden()->end(),
|
||||
eden()->bottom(),
|
||||
sizeof(char)));
|
||||
gclog_or_tty->print_cr(" from: [" PTR_FORMAT ".." PTR_FORMAT ") "
|
||||
SIZE_FORMAT,
|
||||
p2i(from()->bottom()),
|
||||
p2i(from()->end()),
|
||||
pointer_delta(from()->end(),
|
||||
from()->bottom(),
|
||||
sizeof(char)));
|
||||
gclog_or_tty->print_cr(" to: [" PTR_FORMAT ".." PTR_FORMAT ") "
|
||||
SIZE_FORMAT,
|
||||
p2i(to()->bottom()),
|
||||
p2i(to()->end()),
|
||||
pointer_delta( to()->end(),
|
||||
to()->bottom(),
|
||||
sizeof(char)));
|
||||
}
|
||||
|
||||
// There's nothing to do if the new sizes are the same as the current
|
||||
if (requested_survivor_size == to()->capacity() &&
|
||||
requested_survivor_size == from()->capacity() &&
|
||||
requested_eden_size == eden()->capacity()) {
|
||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||
gclog_or_tty->print_cr(" capacities are the right sizes, returning");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
char* eden_start = (char*)eden()->bottom();
|
||||
char* eden_end = (char*)eden()->end();
|
||||
char* from_start = (char*)from()->bottom();
|
||||
char* from_end = (char*)from()->end();
|
||||
char* to_start = (char*)to()->bottom();
|
||||
char* to_end = (char*)to()->end();
|
||||
|
||||
const size_t alignment = os::vm_page_size();
|
||||
const bool maintain_minimum =
|
||||
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
|
||||
|
||||
// Check whether from space is below to space
|
||||
if (from_start < to_start) {
|
||||
// Eden, from, to
|
||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||
gclog_or_tty->print_cr(" Eden, from, to:");
|
||||
}
|
||||
|
||||
// Set eden
|
||||
// "requested_eden_size" is a goal for the size of eden
|
||||
// and may not be attainable. "eden_size" below is
|
||||
// calculated based on the location of from-space and
|
||||
// the goal for the size of eden. from-space is
|
||||
// fixed in place because it contains live data.
|
||||
// The calculation is done this way to avoid 32bit
|
||||
// overflow (i.e., eden_start + requested_eden_size
|
||||
// may too large for representation in 32bits).
|
||||
size_t eden_size;
|
||||
if (maintain_minimum) {
|
||||
// Only make eden larger than the requested size if
|
||||
// the minimum size of the generation has to be maintained.
|
||||
// This could be done in general but policy at a higher
|
||||
// level is determining a requested size for eden and that
|
||||
// should be honored unless there is a fundamental reason.
|
||||
eden_size = pointer_delta(from_start,
|
||||
eden_start,
|
||||
sizeof(char));
|
||||
} else {
|
||||
eden_size = MIN2(requested_eden_size,
|
||||
pointer_delta(from_start, eden_start, sizeof(char)));
|
||||
}
|
||||
|
||||
eden_size = align_size_down(eden_size, alignment);
|
||||
eden_end = eden_start + eden_size;
|
||||
assert(eden_end >= eden_start, "addition overflowed");
|
||||
|
||||
// To may resize into from space as long as it is clear of live data.
|
||||
// From space must remain page aligned, though, so we need to do some
|
||||
// extra calculations.
|
||||
|
||||
// First calculate an optimal to-space
|
||||
to_end = (char*)virtual_space()->high();
|
||||
to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
|
||||
sizeof(char));
|
||||
|
||||
// Does the optimal to-space overlap from-space?
|
||||
if (to_start < (char*)from()->end()) {
|
||||
// Calculate the minimum offset possible for from_end
|
||||
size_t from_size = pointer_delta(from()->top(), from_start, sizeof(char));
|
||||
|
||||
// Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
|
||||
if (from_size == 0) {
|
||||
from_size = alignment;
|
||||
} else {
|
||||
from_size = align_size_up(from_size, alignment);
|
||||
}
|
||||
|
||||
from_end = from_start + from_size;
|
||||
assert(from_end > from_start, "addition overflow or from_size problem");
|
||||
|
||||
guarantee(from_end <= (char*)from()->end(), "from_end moved to the right");
|
||||
|
||||
// Now update to_start with the new from_end
|
||||
to_start = MAX2(from_end, to_start);
|
||||
} else {
|
||||
// If shrinking, move to-space down to abut the end of from-space
|
||||
// so that shrinking will move to-space down. If not shrinking
|
||||
// to-space is moving up to allow for growth on the next expansion.
|
||||
if (requested_eden_size <= cur_eden_size) {
|
||||
to_start = from_end;
|
||||
if (to_start + requested_survivor_size > to_start) {
|
||||
to_end = to_start + requested_survivor_size;
|
||||
}
|
||||
}
|
||||
// else leave to_end pointing to the high end of the virtual space.
|
||||
}
|
||||
|
||||
guarantee(to_start != to_end, "to space is zero sized");
|
||||
|
||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||
gclog_or_tty->print_cr(" [eden_start .. eden_end): "
|
||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
||||
p2i(eden_start),
|
||||
p2i(eden_end),
|
||||
pointer_delta(eden_end, eden_start, sizeof(char)));
|
||||
gclog_or_tty->print_cr(" [from_start .. from_end): "
|
||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
||||
p2i(from_start),
|
||||
p2i(from_end),
|
||||
pointer_delta(from_end, from_start, sizeof(char)));
|
||||
gclog_or_tty->print_cr(" [ to_start .. to_end): "
|
||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
||||
p2i(to_start),
|
||||
p2i(to_end),
|
||||
pointer_delta( to_end, to_start, sizeof(char)));
|
||||
}
|
||||
} else {
|
||||
// Eden, to, from
|
||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||
gclog_or_tty->print_cr(" Eden, to, from:");
|
||||
}
|
||||
|
||||
// Calculate the to-space boundaries based on
|
||||
// the start of from-space.
|
||||
to_end = from_start;
|
||||
to_start = (char*)pointer_delta(from_start,
|
||||
(char*)requested_survivor_size,
|
||||
sizeof(char));
|
||||
// Calculate the ideal eden boundaries.
|
||||
// eden_end is already at the bottom of the generation
|
||||
assert(eden_start == virtual_space()->low(),
|
||||
"Eden is not starting at the low end of the virtual space");
|
||||
if (eden_start + requested_eden_size >= eden_start) {
|
||||
eden_end = eden_start + requested_eden_size;
|
||||
} else {
|
||||
eden_end = to_start;
|
||||
}
|
||||
|
||||
// Does eden intrude into to-space? to-space
|
||||
// gets priority but eden is not allowed to shrink
|
||||
// to 0.
|
||||
if (eden_end > to_start) {
|
||||
eden_end = to_start;
|
||||
}
|
||||
|
||||
// Don't let eden shrink down to 0 or less.
|
||||
eden_end = MAX2(eden_end, eden_start + alignment);
|
||||
assert(eden_start + alignment >= eden_start, "Overflow");
|
||||
|
||||
size_t eden_size;
|
||||
if (maintain_minimum) {
|
||||
// Use all the space available.
|
||||
eden_end = MAX2(eden_end, to_start);
|
||||
eden_size = pointer_delta(eden_end, eden_start, sizeof(char));
|
||||
eden_size = MIN2(eden_size, cur_eden_size);
|
||||
} else {
|
||||
eden_size = pointer_delta(eden_end, eden_start, sizeof(char));
|
||||
}
|
||||
eden_size = align_size_down(eden_size, alignment);
|
||||
assert(maintain_minimum || eden_size <= requested_eden_size,
|
||||
"Eden size is too large");
|
||||
assert(eden_size >= alignment, "Eden size is too small");
|
||||
eden_end = eden_start + eden_size;
|
||||
|
||||
// Move to-space down to eden.
|
||||
if (requested_eden_size < cur_eden_size) {
|
||||
to_start = eden_end;
|
||||
if (to_start + requested_survivor_size > to_start) {
|
||||
to_end = MIN2(from_start, to_start + requested_survivor_size);
|
||||
} else {
|
||||
to_end = from_start;
|
||||
}
|
||||
}
|
||||
|
||||
// eden_end may have moved so again make sure
|
||||
// the to-space and eden don't overlap.
|
||||
to_start = MAX2(eden_end, to_start);
|
||||
|
||||
// from-space
|
||||
size_t from_used = from()->used();
|
||||
if (requested_survivor_size > from_used) {
|
||||
if (from_start + requested_survivor_size >= from_start) {
|
||||
from_end = from_start + requested_survivor_size;
|
||||
}
|
||||
if (from_end > virtual_space()->high()) {
|
||||
from_end = virtual_space()->high();
|
||||
}
|
||||
}
|
||||
|
||||
assert(to_start >= eden_end, "to-space should be above eden");
|
||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||
gclog_or_tty->print_cr(" [eden_start .. eden_end): "
|
||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
||||
p2i(eden_start),
|
||||
p2i(eden_end),
|
||||
pointer_delta(eden_end, eden_start, sizeof(char)));
|
||||
gclog_or_tty->print_cr(" [ to_start .. to_end): "
|
||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
||||
p2i(to_start),
|
||||
p2i(to_end),
|
||||
pointer_delta( to_end, to_start, sizeof(char)));
|
||||
gclog_or_tty->print_cr(" [from_start .. from_end): "
|
||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
||||
p2i(from_start),
|
||||
p2i(from_end),
|
||||
pointer_delta(from_end, from_start, sizeof(char)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
guarantee((HeapWord*)from_start <= from()->bottom(),
|
||||
"from start moved to the right");
|
||||
guarantee((HeapWord*)from_end >= from()->top(),
|
||||
"from end moved into live data");
|
||||
assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
|
||||
assert(is_object_aligned((intptr_t)from_start), "checking alignment");
|
||||
assert(is_object_aligned((intptr_t)to_start), "checking alignment");
|
||||
|
||||
MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
|
||||
MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
|
||||
MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
|
||||
|
||||
// Let's make sure the call to initialize doesn't reset "top"!
|
||||
HeapWord* old_from_top = from()->top();
|
||||
|
||||
// For PrintAdaptiveSizePolicy block below
|
||||
size_t old_from = from()->capacity();
|
||||
size_t old_to = to()->capacity();
|
||||
|
||||
// If not clearing the spaces, do some checking to verify that
|
||||
// the spaces are already mangled.
|
||||
|
||||
// Must check mangling before the spaces are reshaped. Otherwise,
|
||||
// the bottom or end of one space may have moved into another
|
||||
// a failure of the check may not correctly indicate which space
|
||||
// is not properly mangled.
|
||||
if (ZapUnusedHeapArea) {
|
||||
HeapWord* limit = (HeapWord*) virtual_space()->high();
|
||||
eden()->check_mangled_unused_area(limit);
|
||||
from()->check_mangled_unused_area(limit);
|
||||
to()->check_mangled_unused_area(limit);
|
||||
}
|
||||
|
||||
// The call to initialize NULL's the next compaction space
|
||||
eden()->initialize(edenMR,
|
||||
SpaceDecorator::Clear,
|
||||
SpaceDecorator::DontMangle);
|
||||
eden()->set_next_compaction_space(from());
|
||||
to()->initialize(toMR ,
|
||||
SpaceDecorator::Clear,
|
||||
SpaceDecorator::DontMangle);
|
||||
from()->initialize(fromMR,
|
||||
SpaceDecorator::DontClear,
|
||||
SpaceDecorator::DontMangle);
|
||||
|
||||
assert(from()->top() == old_from_top, "from top changed!");
|
||||
|
||||
if (PrintAdaptiveSizePolicy) {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
assert(gch->kind() == CollectedHeap::GenCollectedHeap, "Sanity");
|
||||
|
||||
gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
|
||||
"collection: %d "
|
||||
"(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
|
||||
"(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
|
||||
gch->total_collections(),
|
||||
old_from, old_to,
|
||||
from()->capacity(),
|
||||
to()->capacity());
|
||||
gclog_or_tty->cr();
|
||||
}
|
||||
}
|
||||
|
||||
void ASParNewGeneration::compute_new_size() {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
|
||||
"not a CMS generational heap");
|
||||
|
||||
|
||||
CMSAdaptiveSizePolicy* size_policy =
|
||||
(CMSAdaptiveSizePolicy*)gch->gen_policy()->size_policy();
|
||||
assert(size_policy->is_gc_cms_adaptive_size_policy(),
|
||||
"Wrong type of size policy");
|
||||
|
||||
size_t survived = from()->used();
|
||||
if (!survivor_overflow()) {
|
||||
// Keep running averages on how much survived
|
||||
size_policy->avg_survived()->sample(survived);
|
||||
} else {
|
||||
size_t promoted =
|
||||
(size_t) next_gen()->gc_stats()->avg_promoted()->last_sample();
|
||||
assert(promoted < gch->capacity(), "Conversion problem?");
|
||||
size_t survived_guess = survived + promoted;
|
||||
size_policy->avg_survived()->sample(survived_guess);
|
||||
}
|
||||
|
||||
size_t survivor_limit = max_survivor_size();
|
||||
_tenuring_threshold =
|
||||
size_policy->compute_survivor_space_size_and_threshold(
|
||||
_survivor_overflow,
|
||||
_tenuring_threshold,
|
||||
survivor_limit);
|
||||
size_policy->avg_young_live()->sample(used());
|
||||
size_policy->avg_eden_live()->sample(eden()->used());
|
||||
|
||||
size_policy->compute_eden_space_size(eden()->capacity(), max_gen_size());
|
||||
|
||||
resize(size_policy->calculated_eden_size_in_bytes(),
|
||||
size_policy->calculated_survivor_size_in_bytes());
|
||||
|
||||
if (UsePerfData) {
|
||||
CMSGCAdaptivePolicyCounters* counters =
|
||||
(CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
|
||||
assert(counters->kind() ==
|
||||
GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
|
||||
"Wrong kind of counters");
|
||||
counters->update_tenuring_threshold(_tenuring_threshold);
|
||||
counters->update_survivor_overflowed(_survivor_overflow);
|
||||
counters->update_young_capacity(capacity());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Changes from PSYoungGen version
|
||||
// value of "alignment"
|
||||
void ASParNewGeneration::space_invariants() {
|
||||
const size_t alignment = os::vm_page_size();
|
||||
|
||||
// Currently, our eden size cannot shrink to zero
|
||||
guarantee(eden()->capacity() >= alignment, "eden too small");
|
||||
guarantee(from()->capacity() >= alignment, "from too small");
|
||||
guarantee(to()->capacity() >= alignment, "to too small");
|
||||
|
||||
// Relationship of spaces to each other
|
||||
char* eden_start = (char*)eden()->bottom();
|
||||
char* eden_end = (char*)eden()->end();
|
||||
char* from_start = (char*)from()->bottom();
|
||||
char* from_end = (char*)from()->end();
|
||||
char* to_start = (char*)to()->bottom();
|
||||
char* to_end = (char*)to()->end();
|
||||
|
||||
guarantee(eden_start >= virtual_space()->low(), "eden bottom");
|
||||
guarantee(eden_start < eden_end, "eden space consistency");
|
||||
guarantee(from_start < from_end, "from space consistency");
|
||||
guarantee(to_start < to_end, "to space consistency");
|
||||
|
||||
// Check whether from space is below to space
|
||||
if (from_start < to_start) {
|
||||
// Eden, from, to
|
||||
guarantee(eden_end <= from_start, "eden/from boundary");
|
||||
guarantee(from_end <= to_start, "from/to boundary");
|
||||
guarantee(to_end <= virtual_space()->high(), "to end");
|
||||
} else {
|
||||
// Eden, to, from
|
||||
guarantee(eden_end <= to_start, "eden/to boundary");
|
||||
guarantee(to_end <= from_start, "to/from boundary");
|
||||
guarantee(from_end <= virtual_space()->high(), "from end");
|
||||
}
|
||||
|
||||
// More checks that the virtual space is consistent with the spaces
|
||||
assert(virtual_space()->committed_size() >=
|
||||
(eden()->capacity() +
|
||||
to()->capacity() +
|
||||
from()->capacity()), "Committed size is inconsistent");
|
||||
assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
|
||||
"Space invariant");
|
||||
char* eden_top = (char*)eden()->top();
|
||||
char* from_top = (char*)from()->top();
|
||||
char* to_top = (char*)to()->top();
|
||||
assert(eden_top <= virtual_space()->high(), "eden top");
|
||||
assert(from_top <= virtual_space()->high(), "from top");
|
||||
assert(to_top <= virtual_space()->high(), "to top");
|
||||
}
|
||||
#endif
|
@ -1,98 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_ASPARNEWGENERATION_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_PARNEW_ASPARNEWGENERATION_HPP
|
||||
|
||||
#include "gc_implementation/parNew/parNewGeneration.hpp"
|
||||
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
|
||||
|
||||
// A Generation that does parallel young-gen collection extended
|
||||
// for adaptive size policy.
|
||||
|
||||
// Division of generation into spaces
|
||||
// done by DefNewGeneration::compute_space_boundaries()
|
||||
// +---------------+
|
||||
// | uncommitted |
|
||||
// |---------------|
|
||||
// | ss0 |
|
||||
// |---------------|
|
||||
// | ss1 |
|
||||
// |---------------|
|
||||
// | |
|
||||
// | eden |
|
||||
// | |
|
||||
// +---------------+ <-- low end of VirtualSpace
|
||||
//
|
||||
class ASParNewGeneration: public ParNewGeneration {
|
||||
|
||||
size_t _min_gen_size;
|
||||
|
||||
// Resize the generation based on the desired sizes of
|
||||
// the constituent spaces.
|
||||
bool resize_generation(size_t eden_size, size_t survivor_size);
|
||||
// Resize the spaces based on their desired sizes but
|
||||
// respecting the maximum size of the generation.
|
||||
void resize_spaces(size_t eden_size, size_t survivor_size);
|
||||
// Return the byte size remaining to the minimum generation size.
|
||||
size_t available_to_min_gen();
|
||||
// Return the byte size remaining to the live data in the generation.
|
||||
size_t available_to_live() const;
|
||||
// Return the byte size that the generation is allowed to shrink.
|
||||
size_t limit_gen_shrink(size_t bytes);
|
||||
// Reset the size of the spaces after a shrink of the generation.
|
||||
void reset_survivors_after_shrink();
|
||||
|
||||
// Accessor
|
||||
VirtualSpace* virtual_space() { return &_virtual_space; }
|
||||
|
||||
virtual void adjust_desired_tenuring_threshold();
|
||||
|
||||
public:
|
||||
|
||||
ASParNewGeneration(ReservedSpace rs,
|
||||
size_t initial_byte_size,
|
||||
size_t min_byte_size,
|
||||
int level);
|
||||
|
||||
virtual const char* short_name() const { return "ASParNew"; }
|
||||
virtual const char* name() const;
|
||||
virtual Generation::Name kind() { return ASParNew; }
|
||||
|
||||
// Change the sizes of eden and the survivor spaces in
|
||||
// the generation. The parameters are desired sizes
|
||||
// and are not guaranteed to be met. For example, if
|
||||
// the total is larger than the generation.
|
||||
void resize(size_t eden_size, size_t survivor_size);
|
||||
|
||||
virtual void compute_new_size();
|
||||
|
||||
size_t max_gen_size() { return _reserved.byte_size(); }
|
||||
size_t min_gen_size() const { return _min_gen_size; }
|
||||
|
||||
// Space boundary invariant checker
|
||||
void space_invariants() PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_ASPARNEWGENERATION_HPP
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user