This commit is contained in:
J. Duke 2017-07-05 18:59:30 +02:00
commit 364b77d04c
220 changed files with 8409 additions and 1581 deletions

View File

@ -215,3 +215,4 @@ e1a929afcfc492470d50be0b6b0e8dc77d3760b9 jdk8-b88
cb51fb4789ac0b8be4056482077ddfb8f3bd3805 jdk8-b91 cb51fb4789ac0b8be4056482077ddfb8f3bd3805 jdk8-b91
3a36c926a7aafa9d4a892a45ef3678e87ad8359b jdk8-b92 3a36c926a7aafa9d4a892a45ef3678e87ad8359b jdk8-b92
27c51c6e31c1ef36afa0e6efb031f9b13f26c12b jdk8-b93 27c51c6e31c1ef36afa0e6efb031f9b13f26c12b jdk8-b93
50d2bde060f2a9bbbe4da0c8986e20aca61f2e2e jdk8-b94

View File

@ -363,7 +363,11 @@ AC_ARG_WITH([devkit], [AS_HELP_STRING([--with-devkit],
AC_MSG_ERROR([Cannot specify both --with-devkit and --with-tools-dir at the same time]) AC_MSG_ERROR([Cannot specify both --with-devkit and --with-tools-dir at the same time])
fi fi
TOOLS_DIR=$with_devkit/bin TOOLS_DIR=$with_devkit/bin
SYS_ROOT=$with_devkit/$host_alias/libc if test -d "$with_devkit/$host_alias/libc"; then
SYS_ROOT=$with_devkit/$host_alias/libc
elif test -d "$with_devkit/$host/sys-root"; then
SYS_ROOT=$with_devkit/$host/sys-root
fi
]) ])
]) ])

View File

@ -162,7 +162,12 @@ AC_DEFUN([BPERF_SETUP_CCACHE],
[disable using ccache to speed up recompilations @<:@enabled@:>@])], [disable using ccache to speed up recompilations @<:@enabled@:>@])],
[ENABLE_CCACHE=${enable_ccache}], [ENABLE_CCACHE=yes]) [ENABLE_CCACHE=${enable_ccache}], [ENABLE_CCACHE=yes])
if test "x$ENABLE_CCACHE" = xyes; then if test "x$ENABLE_CCACHE" = xyes; then
OLD_PATH="$PATH"
if test "x$TOOLS_DIR" != x; then
PATH=$TOOLS_DIR:$PATH
fi
AC_PATH_PROG(CCACHE, ccache) AC_PATH_PROG(CCACHE, ccache)
PATH="$OLD_PATH"
else else
AC_MSG_CHECKING([for ccache]) AC_MSG_CHECKING([for ccache])
AC_MSG_RESULT([explicitly disabled]) AC_MSG_RESULT([explicitly disabled])

View File

@ -3782,7 +3782,7 @@ fi
#CUSTOM_AUTOCONF_INCLUDE #CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks: # Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1370949244 DATE_WHEN_GENERATED=1371547824
############################################################################### ###############################################################################
# #
@ -7471,7 +7471,11 @@ if test "${with_devkit+set}" = set; then :
as_fn_error $? "Cannot specify both --with-devkit and --with-tools-dir at the same time" "$LINENO" 5 as_fn_error $? "Cannot specify both --with-devkit and --with-tools-dir at the same time" "$LINENO" 5
fi fi
TOOLS_DIR=$with_devkit/bin TOOLS_DIR=$with_devkit/bin
SYS_ROOT=$with_devkit/$host_alias/libc if test -d "$with_devkit/$host_alias/libc"; then
SYS_ROOT=$with_devkit/$host_alias/libc
elif test -d "$with_devkit/$host/sys-root"; then
SYS_ROOT=$with_devkit/$host/sys-root
fi
fi fi
@ -29144,7 +29148,6 @@ CXX_FLAG_DEPS="-MMD -MF"
case $COMPILER_TYPE in case $COMPILER_TYPE in
CC ) CC )
D_FLAG="-g"
case $COMPILER_NAME in case $COMPILER_NAME in
gcc ) gcc )
case $OPENJDK_TARGET_OS in case $OPENJDK_TARGET_OS in
@ -29159,17 +29162,17 @@ case $COMPILER_TYPE in
C_O_FLAG_HI="-O3" C_O_FLAG_HI="-O3"
C_O_FLAG_NORM="-O2" C_O_FLAG_NORM="-O2"
C_O_FLAG_NONE="-O0" C_O_FLAG_NONE="-O0"
CFLAGS_DEBUG_SYMBOLS="-g"
CXXFLAGS_DEBUG_SYMBOLS="-g"
if test "x$OPENJDK_TARGET_CPU_BITS" = "x64" && test "x$DEBUG_LEVEL" = "xfastdebug"; then
CFLAGS_DEBUG_SYMBOLS="-g1"
CXXFLAGS_DEBUG_SYMBOLS="-g1"
fi
;; ;;
esac esac
CXX_O_FLAG_HI="$C_O_FLAG_HI" CXX_O_FLAG_HI="$C_O_FLAG_HI"
CXX_O_FLAG_NORM="$C_O_FLAG_NORM" CXX_O_FLAG_NORM="$C_O_FLAG_NORM"
CXX_O_FLAG_NONE="$C_O_FLAG_NONE" CXX_O_FLAG_NONE="$C_O_FLAG_NONE"
CFLAGS_DEBUG_SYMBOLS="-g"
CXXFLAGS_DEBUG_SYMBOLS="-g"
if test "x$OPENJDK_TARGET_CPU_BITS" = "x64" && test "x$DEBUG_LEVEL" = "xfastdebug"; then
CFLAGS_DEBUG_SYMBOLS="-g1"
CXXFLAGS_DEBUG_SYMBOLS="-g1"
fi
;; ;;
ossc ) ossc )
# #
@ -29250,7 +29253,6 @@ case $COMPILER_TYPE in
esac esac
;; ;;
CL ) CL )
D_FLAG=
C_O_FLAG_HIGHEST="-O2" C_O_FLAG_HIGHEST="-O2"
C_O_FLAG_HI="-O1" C_O_FLAG_HI="-O1"
C_O_FLAG_NORM="-O1" C_O_FLAG_NORM="-O1"
@ -29389,6 +29391,28 @@ esac
############################################################################### ###############################################################################
# Adjust flags according to debug level.
case $DEBUG_LEVEL in
fastdebug )
CFLAGS_JDK="$CFLAGS_JDK $CFLAGS_DEBUG_SYMBOLS"
CXXFLAGS_JDK="$CXXFLAGS_JDK $CXXFLAGS_DEBUG_SYMBOLS"
C_O_FLAG_HI="$C_O_FLAG_NORM"
C_O_FLAG_NORM="$C_O_FLAG_NORM"
CXX_O_FLAG_HI="$CXX_O_FLAG_NORM"
CXX_O_FLAG_NORM="$CXX_O_FLAG_NORM"
JAVAC_FLAGS="$JAVAC_FLAGS -g"
;;
slowdebug )
CFLAGS_JDK="$CFLAGS_JDK $CFLAGS_DEBUG_SYMBOLS"
CXXFLAGS_JDK="$CXXFLAGS_JDK $CXXFLAGS_DEBUG_SYMBOLS"
C_O_FLAG_HI="$C_O_FLAG_NONE"
C_O_FLAG_NORM="$C_O_FLAG_NONE"
CXX_O_FLAG_HI="$CXX_O_FLAG_NONE"
CXX_O_FLAG_NORM="$CXX_O_FLAG_NONE"
JAVAC_FLAGS="$JAVAC_FLAGS -g"
;;
esac
CCXXFLAGS_JDK="$CCXXFLAGS_JDK $ADD_LP64" CCXXFLAGS_JDK="$CCXXFLAGS_JDK $ADD_LP64"
# The package path is used only on macosx? # The package path is used only on macosx?
@ -29532,23 +29556,6 @@ else
fi fi
fi fi
# Adjust flags according to debug level.
case $DEBUG_LEVEL in
fastdebug )
CFLAGS="$CFLAGS $D_FLAG"
JAVAC_FLAGS="$JAVAC_FLAGS -g"
;;
slowdebug )
CFLAGS="$CFLAGS $D_FLAG"
C_O_FLAG_HI="$C_O_FLAG_NONE"
C_O_FLAG_NORM="$C_O_FLAG_NONE"
CXX_O_FLAG_HI="$CXX_O_FLAG_NONE"
CXX_O_FLAG_NORM="$CXX_O_FLAG_NONE"
JAVAC_FLAGS="$JAVAC_FLAGS -g"
;;
esac
@ -29907,11 +29914,17 @@ if test "x$SYS_ROOT" != "x/"; then
if test "x$x_includes" = xNONE; then if test "x$x_includes" = xNONE; then
if test -f "$SYS_ROOT/usr/X11R6/include/X11/Xlib.h"; then if test -f "$SYS_ROOT/usr/X11R6/include/X11/Xlib.h"; then
x_includes="$SYS_ROOT/usr/X11R6/include" x_includes="$SYS_ROOT/usr/X11R6/include"
elif test -f "$SYS_ROOT/usr/include/X11/Xlib.h"; then
x_includes="$SYS_ROOT/usr/include"
fi fi
fi fi
if test "x$x_libraries" = xNONE; then if test "x$x_libraries" = xNONE; then
if test -f "$SYS_ROOT/usr/X11R6/lib/libX11.so"; then if test -f "$SYS_ROOT/usr/X11R6/lib/libX11.so"; then
x_libraries="$SYS_ROOT/usr/X11R6/lib" x_libraries="$SYS_ROOT/usr/X11R6/lib"
elif test "$SYS_ROOT/usr/lib64/libX11.so" && test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
x_libraries="$SYS_ROOT/usr/lib64"
elif test -f "$SYS_ROOT/usr/lib/libX11.so"; then
x_libraries="$SYS_ROOT/usr/lib"
fi fi
fi fi
fi fi
@ -30642,8 +30655,7 @@ fi
if test "x$OPENJDK_TARGET_OS" = xlinux; then if test "x$OPENJDK_TARGET_OS" = xlinux; then
if test -d "$SYS_ROOT/usr/X11R6"; then if test -d "$SYS_ROOT/usr/X11R6"; then
OPENWIN_HOME="$SYS_ROOT/usr/X11R6" OPENWIN_HOME="$SYS_ROOT/usr/X11R6"
fi elif test -d "$SYS_ROOT/usr/include/X11"; then
if test -d "$SYS_ROOT/usr/include/X11"; then
OPENWIN_HOME="$SYS_ROOT/usr" OPENWIN_HOME="$SYS_ROOT/usr"
fi fi
fi fi
@ -31536,12 +31548,12 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for freetype in some standard locations" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for freetype in some standard locations" >&5
$as_echo_n "checking for freetype in some standard locations... " >&6; } $as_echo_n "checking for freetype in some standard locations... " >&6; }
if test -s /usr/X11/include/ft2build.h && test -d /usr/X11/include/freetype2/freetype; then if test -s $SYS_ROOT/usr/X11/include/ft2build.h && test -d $SYS_ROOT/usr/X11/include/freetype2/freetype; then
DEFAULT_FREETYPE_CFLAGS="-I/usr/X11/include/freetype2 -I/usr/X11/include" DEFAULT_FREETYPE_CFLAGS="-I$SYS_ROOT/usr/X11/include/freetype2 -I$SYS_ROOT/usr/X11/include"
DEFAULT_FREETYPE_LIBS="-L/usr/X11/lib -lfreetype" DEFAULT_FREETYPE_LIBS="-L$SYS_ROOT/usr/X11/lib -lfreetype"
fi fi
if test -s /usr/include/ft2build.h && test -d /usr/include/freetype2/freetype; then if test -s $SYS_ROOT/usr/include/ft2build.h && test -d $SYS_ROOT/usr/include/freetype2/freetype; then
DEFAULT_FREETYPE_CFLAGS="-I/usr/include/freetype2" DEFAULT_FREETYPE_CFLAGS="-I$SYS_ROOT/usr/include/freetype2"
DEFAULT_FREETYPE_LIBS="-lfreetype" DEFAULT_FREETYPE_LIBS="-lfreetype"
fi fi
@ -33217,6 +33229,10 @@ else
fi fi
if test "x$ENABLE_CCACHE" = xyes; then if test "x$ENABLE_CCACHE" = xyes; then
OLD_PATH="$PATH"
if test "x$TOOLS_DIR" != x; then
PATH=$TOOLS_DIR:$PATH
fi
# Extract the first word of "ccache", so it can be a program name with args. # Extract the first word of "ccache", so it can be a program name with args.
set dummy ccache; ac_word=$2 set dummy ccache; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
@ -33257,6 +33273,7 @@ $as_echo "no" >&6; }
fi fi
PATH="$OLD_PATH"
else else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ccache" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ccache" >&5
$as_echo_n "checking for ccache... " >&6; } $as_echo_n "checking for ccache... " >&6; }

View File

@ -123,11 +123,17 @@ if test "x$SYS_ROOT" != "x/"; then
if test "x$x_includes" = xNONE; then if test "x$x_includes" = xNONE; then
if test -f "$SYS_ROOT/usr/X11R6/include/X11/Xlib.h"; then if test -f "$SYS_ROOT/usr/X11R6/include/X11/Xlib.h"; then
x_includes="$SYS_ROOT/usr/X11R6/include" x_includes="$SYS_ROOT/usr/X11R6/include"
elif test -f "$SYS_ROOT/usr/include/X11/Xlib.h"; then
x_includes="$SYS_ROOT/usr/include"
fi fi
fi fi
if test "x$x_libraries" = xNONE; then if test "x$x_libraries" = xNONE; then
if test -f "$SYS_ROOT/usr/X11R6/lib/libX11.so"; then if test -f "$SYS_ROOT/usr/X11R6/lib/libX11.so"; then
x_libraries="$SYS_ROOT/usr/X11R6/lib" x_libraries="$SYS_ROOT/usr/X11R6/lib"
elif test "$SYS_ROOT/usr/lib64/libX11.so" && test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
x_libraries="$SYS_ROOT/usr/lib64"
elif test -f "$SYS_ROOT/usr/lib/libX11.so"; then
x_libraries="$SYS_ROOT/usr/lib"
fi fi
fi fi
fi fi
@ -153,8 +159,7 @@ fi
if test "x$OPENJDK_TARGET_OS" = xlinux; then if test "x$OPENJDK_TARGET_OS" = xlinux; then
if test -d "$SYS_ROOT/usr/X11R6"; then if test -d "$SYS_ROOT/usr/X11R6"; then
OPENWIN_HOME="$SYS_ROOT/usr/X11R6" OPENWIN_HOME="$SYS_ROOT/usr/X11R6"
fi elif test -d "$SYS_ROOT/usr/include/X11"; then
if test -d "$SYS_ROOT/usr/include/X11"; then
OPENWIN_HOME="$SYS_ROOT/usr" OPENWIN_HOME="$SYS_ROOT/usr"
fi fi
fi fi
@ -359,12 +364,12 @@ else
if test "x$FREETYPE2_FOUND" = xno; then if test "x$FREETYPE2_FOUND" = xno; then
AC_MSG_CHECKING([for freetype in some standard locations]) AC_MSG_CHECKING([for freetype in some standard locations])
if test -s /usr/X11/include/ft2build.h && test -d /usr/X11/include/freetype2/freetype; then if test -s $SYS_ROOT/usr/X11/include/ft2build.h && test -d $SYS_ROOT/usr/X11/include/freetype2/freetype; then
DEFAULT_FREETYPE_CFLAGS="-I/usr/X11/include/freetype2 -I/usr/X11/include" DEFAULT_FREETYPE_CFLAGS="-I$SYS_ROOT/usr/X11/include/freetype2 -I$SYS_ROOT/usr/X11/include"
DEFAULT_FREETYPE_LIBS="-L/usr/X11/lib -lfreetype" DEFAULT_FREETYPE_LIBS="-L$SYS_ROOT/usr/X11/lib -lfreetype"
fi fi
if test -s /usr/include/ft2build.h && test -d /usr/include/freetype2/freetype; then if test -s $SYS_ROOT/usr/include/ft2build.h && test -d $SYS_ROOT/usr/include/freetype2/freetype; then
DEFAULT_FREETYPE_CFLAGS="-I/usr/include/freetype2" DEFAULT_FREETYPE_CFLAGS="-I$SYS_ROOT/usr/include/freetype2"
DEFAULT_FREETYPE_LIBS="-lfreetype" DEFAULT_FREETYPE_LIBS="-lfreetype"
fi fi

View File

@ -275,6 +275,8 @@ FREETYPE2_LIBS:=@FREETYPE2_LIBS@
FREETYPE2_CFLAGS:=@FREETYPE2_CFLAGS@ FREETYPE2_CFLAGS:=@FREETYPE2_CFLAGS@
USING_SYSTEM_FT_LIB=@USING_SYSTEM_FT_LIB@ USING_SYSTEM_FT_LIB=@USING_SYSTEM_FT_LIB@
CUPS_CFLAGS:=@CUPS_CFLAGS@ CUPS_CFLAGS:=@CUPS_CFLAGS@
ALSA_LIBS:=@ALSA_LIBS@
ALSA_CFLAGS:=@ALSA_CFLAGS@
PACKAGE_PATH=@PACKAGE_PATH@ PACKAGE_PATH=@PACKAGE_PATH@

View File

@ -629,7 +629,6 @@ CXX_FLAG_DEPS="-MMD -MF"
case $COMPILER_TYPE in case $COMPILER_TYPE in
CC ) CC )
D_FLAG="-g"
case $COMPILER_NAME in case $COMPILER_NAME in
gcc ) gcc )
case $OPENJDK_TARGET_OS in case $OPENJDK_TARGET_OS in
@ -644,17 +643,17 @@ case $COMPILER_TYPE in
C_O_FLAG_HI="-O3" C_O_FLAG_HI="-O3"
C_O_FLAG_NORM="-O2" C_O_FLAG_NORM="-O2"
C_O_FLAG_NONE="-O0" C_O_FLAG_NONE="-O0"
CFLAGS_DEBUG_SYMBOLS="-g"
CXXFLAGS_DEBUG_SYMBOLS="-g"
if test "x$OPENJDK_TARGET_CPU_BITS" = "x64" && test "x$DEBUG_LEVEL" = "xfastdebug"; then
CFLAGS_DEBUG_SYMBOLS="-g1"
CXXFLAGS_DEBUG_SYMBOLS="-g1"
fi
;; ;;
esac esac
CXX_O_FLAG_HI="$C_O_FLAG_HI" CXX_O_FLAG_HI="$C_O_FLAG_HI"
CXX_O_FLAG_NORM="$C_O_FLAG_NORM" CXX_O_FLAG_NORM="$C_O_FLAG_NORM"
CXX_O_FLAG_NONE="$C_O_FLAG_NONE" CXX_O_FLAG_NONE="$C_O_FLAG_NONE"
CFLAGS_DEBUG_SYMBOLS="-g"
CXXFLAGS_DEBUG_SYMBOLS="-g"
if test "x$OPENJDK_TARGET_CPU_BITS" = "x64" && test "x$DEBUG_LEVEL" = "xfastdebug"; then
CFLAGS_DEBUG_SYMBOLS="-g1"
CXXFLAGS_DEBUG_SYMBOLS="-g1"
fi
;; ;;
ossc ) ossc )
# #
@ -735,7 +734,6 @@ case $COMPILER_TYPE in
esac esac
;; ;;
CL ) CL )
D_FLAG=
C_O_FLAG_HIGHEST="-O2" C_O_FLAG_HIGHEST="-O2"
C_O_FLAG_HI="-O1" C_O_FLAG_HI="-O1"
C_O_FLAG_NORM="-O1" C_O_FLAG_NORM="-O1"
@ -861,6 +859,28 @@ esac
############################################################################### ###############################################################################
# Adjust flags according to debug level.
case $DEBUG_LEVEL in
fastdebug )
CFLAGS_JDK="$CFLAGS_JDK $CFLAGS_DEBUG_SYMBOLS"
CXXFLAGS_JDK="$CXXFLAGS_JDK $CXXFLAGS_DEBUG_SYMBOLS"
C_O_FLAG_HI="$C_O_FLAG_NORM"
C_O_FLAG_NORM="$C_O_FLAG_NORM"
CXX_O_FLAG_HI="$CXX_O_FLAG_NORM"
CXX_O_FLAG_NORM="$CXX_O_FLAG_NORM"
JAVAC_FLAGS="$JAVAC_FLAGS -g"
;;
slowdebug )
CFLAGS_JDK="$CFLAGS_JDK $CFLAGS_DEBUG_SYMBOLS"
CXXFLAGS_JDK="$CXXFLAGS_JDK $CXXFLAGS_DEBUG_SYMBOLS"
C_O_FLAG_HI="$C_O_FLAG_NONE"
C_O_FLAG_NORM="$C_O_FLAG_NONE"
CXX_O_FLAG_HI="$CXX_O_FLAG_NONE"
CXX_O_FLAG_NORM="$CXX_O_FLAG_NONE"
JAVAC_FLAGS="$JAVAC_FLAGS -g"
;;
esac
CCXXFLAGS_JDK="$CCXXFLAGS_JDK $ADD_LP64" CCXXFLAGS_JDK="$CCXXFLAGS_JDK $ADD_LP64"
# The package path is used only on macosx? # The package path is used only on macosx?
@ -1004,23 +1024,6 @@ else
fi fi
fi fi
# Adjust flags according to debug level.
case $DEBUG_LEVEL in
fastdebug )
CFLAGS="$CFLAGS $D_FLAG"
JAVAC_FLAGS="$JAVAC_FLAGS -g"
;;
slowdebug )
CFLAGS="$CFLAGS $D_FLAG"
C_O_FLAG_HI="$C_O_FLAG_NONE"
C_O_FLAG_NORM="$C_O_FLAG_NONE"
CXX_O_FLAG_HI="$CXX_O_FLAG_NONE"
CXX_O_FLAG_NORM="$CXX_O_FLAG_NONE"
JAVAC_FLAGS="$JAVAC_FLAGS -g"
;;
esac
AC_SUBST(CFLAGS_JDKLIB) AC_SUBST(CFLAGS_JDKLIB)
AC_SUBST(CFLAGS_JDKEXE) AC_SUBST(CFLAGS_JDKEXE)

View File

@ -108,7 +108,11 @@ define add_native_source
# setting -showIncludes, all included files are printed. These are filtered out and # setting -showIncludes, all included files are printed. These are filtered out and
# parsed into make dependences. # parsed into make dependences.
ifeq ($(COMPILER_TYPE),CL) ifeq ($(COMPILER_TYPE),CL)
$$($1_$2_COMP) $$($1_$2_FLAGS) -showIncludes $$($1_$2_DEBUG_OUT_FLAGS) $(CC_OUT_OPTION)$$($1_$2_OBJ) $2 | $(TEE) $$($1_$2_DEP).raw | $(GREP) -v "^Note: including file:" ($$($1_$2_COMP) $$($1_$2_FLAGS) -showIncludes $$($1_$2_DEBUG_OUT_FLAGS) \
$(CC_OUT_OPTION)$$($1_$2_OBJ) $2 ; echo $$$$? > $$($1_$2_DEP).exitvalue) \
| $(TEE) $$($1_$2_DEP).raw | $(GREP) -v "^Note: including file:" \
&& exit `cat $$($1_$2_DEP).exitvalue`
$(RM) $$($1_$2_DEP).exitvalue
($(ECHO) $$@: \\ \ ($(ECHO) $$@: \\ \
&& $(SED) -e '/^Note: including file:/!d' \ && $(SED) -e '/^Note: including file:/!d' \
-e 's|Note: including file: *||' \ -e 's|Note: including file: *||' \

View File

@ -0,0 +1,123 @@
#
# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
##########################################################################################
#
# This Makefile, together with Tools.gmk, can be used to compile a set of
# gcc based cross compilation, portable, self contained packages, capable
# of building OpenJDK.
#
# In addition to the makefiles, access to Oracle Linux installation
# media is required. This has been tested against Oracle Enterprise Linux
# 5.5. Set variables RPM_DIR_x86_64 and RPM_DIR_i686 respectively to point
# to directory containing the RPMs.
#
# By default this Makefile will build crosstools for:
# * i686-unknown-linux-gnu
# * x86_64-unknown-linux-gnu
# The x86_64 version of the compilers will work in multi arch mode and will
# be able to compile 32bit binaries with the -m32 flag. This makes the
# explicit cross compiler for i686 somewhat redundant and is a known issue.
#
# To build the full set of crosstools, use a command line looking like this:
#
# make tars RPM_DIR_x86_64=/tmp/oel55-x86_64/Server/ RPM_DIR_i686=/tmp/oel55-i686/Server/
#
# To create a x86_64 package without the redundant i686 cross compiler, do
# like this:
#
# make tars platforms=x86_64-unknown-linux-gnu RPM_DIR_x86_64=/tmp/oel55-x86_64/Server/ RPM_DIR_i686=/tmp/oel55-i686/Server/
#
# Main makefile which iterates over all host and target platforms.
#
os := $(shell uname -o)
cpu := x86_64
#$(shell uname -p)
#
# This wrapper script can handle exactly these platforms
#
platforms := $(foreach p,x86_64 i686,$(p)-unknown-linux-gnu)
#platforms := $(foreach p,x86_64,$(p)-unknown-linux-gnu)
# Figure out what platform this is building on.
me := $(cpu)-$(if $(findstring Linux,$(os)),unknown-linux-gnu)
$(info Building on platform $(me))
all compile : $(platforms)
ifeq (,$(SKIP_ME))
$(foreach p,$(filter-out $(me),$(platforms)),$(eval $(p) : $$(me)))
endif
OUTPUT_ROOT = $(abspath ../../../build/devkit)
RESULT = $(OUTPUT_ROOT)/result
submakevars = HOST=$@ BUILD=$(me) \
RESULT=$(RESULT) PREFIX=$(RESULT)/$@ \
OUTPUT_ROOT=$(OUTPUT_ROOT)
$(platforms) :
@echo 'Building compilers for $@'
@echo 'Targets: $(platforms)'
for p in $@ $(filter-out $@,$(platforms)); do \
$(MAKE) -f Tools.gmk all $(submakevars) \
TARGET=$$p || exit 1 ; \
done
@echo 'Building ccache program for $@'
$(MAKE) -f Tools.gmk ccache $(submakevars) TARGET=$@
@echo 'All done"'
$(foreach a,i686 x86_64,$(eval $(a) : $(filter $(a)%,$(platforms))))
ia32 : i686
today := $(shell date +%Y%m%d)
define Mktar
$(1)_tar = $$(RESULT)/sdk-$(1)-$$(today).tar.gz
$$($(1)_tar) : PLATFORM = $(1)
TARFILES += $$($(1)_tar)
$$($(1)_tar) : $(1) $$(shell find $$(RESULT)/$(1))
endef
$(foreach p,$(platforms),$(eval $(call Mktar,$(p))))
tars : all $(TARFILES)
onlytars : $(TARFILES)
%.tar.gz :
@echo 'Creating compiler package $@'
cd $(RESULT)/$(PLATFORM) && tar -czf $@ *
touch $@
clean :
rm -rf build result
FORCE :
.PHONY : $(configs) $(platforms)

View File

@ -0,0 +1,473 @@
#
# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
##########################################################################################
#
# Workhorse makefile for creating ONE cross compiler
# Needs either to be from BUILD -> BUILD OR have
# BUILD -> HOST prebuilt
#
# NOTE: There is a bug here. We don't limit the
# PATH when building BUILD -> BUILD, which means that
# if you configure after you've once build the BUILD->BUILD
# compiler THAT one will be picked up as the compiler for itself.
# This is not so great, especially if you did a partial delete
# of the target tree.
#
# Fix this...
#
$(info TARGET=$(TARGET))
$(info HOST=$(HOST))
$(info BUILD=$(BUILD))
ARCH := $(word 1,$(subst -, ,$(TARGET)))
##########################################################################################
# Define external dependencies
# Latest that could be made to work.
gcc_ver := gcc-4.7.3
binutils_ver := binutils-2.22
ccache_ver := ccache-3.1.9
mpfr_ver := mpfr-3.0.1
gmp_ver := gmp-4.3.2
mpc_ver := mpc-1.0.1
GCC := http://ftp.gnu.org/pub/gnu/gcc/$(gcc_ver)/$(gcc_ver).tar.bz2
BINUTILS := http://ftp.gnu.org/pub/gnu/binutils/$(binutils_ver).tar.bz2
CCACHE := http://samba.org/ftp/ccache/$(ccache_ver).tar.gz
MPFR := http://www.mpfr.org/${mpfr_ver}/${mpfr_ver}.tar.bz2
GMP := http://ftp.gnu.org/pub/gnu/gmp/${gmp_ver}.tar.bz2
MPC := http://www.multiprecision.org/mpc/download/${mpc_ver}.tar.gz
# RPMs in OEL5.5
RPM_LIST := \
kernel-headers \
glibc-2 glibc-headers glibc-devel \
cups-libs cups-devel \
libX11 libX11-devel \
xorg-x11-proto-devel \
alsa-lib alsa-lib-devel \
libXext libXext-devel \
libXtst libXtst-devel \
libXrender libXrender-devel \
freetype freetype-devel \
libXt libXt-devel \
libSM libSM-devel \
libICE libICE-devel \
libXi libXi-devel \
libXdmcp libXdmcp-devel \
libXau libXau-devel \
libgcc
ifeq ($(ARCH),x86_64)
RPM_DIR ?= $(RPM_DIR_x86_64)
RPM_ARCHS := x86_64
ifeq ($(BUILD),$(HOST))
ifeq ($(TARGET),$(HOST))
# When building the native compiler for x86_64, enable mixed mode.
RPM_ARCHS += i386 i686
endif
endif
else
RPM_DIR ?= $(RPM_DIR_i686)
RPM_ARCHS := i386 i686
endif
# Sort to remove duplicates
RPM_FILE_LIST := $(sort $(foreach a,$(RPM_ARCHS),$(wildcard $(patsubst %,$(RPM_DIR)/%*$a.rpm,$(RPM_LIST)))))
ifeq ($(RPM_FILE_LIST),)
$(error Found no RPMs, RPM_DIR must point to list of directories to search for RPMs)
endif
##########################################################################################
# Define common directories and files
# Ensure we have 32-bit libs also for x64. We enable mixed-mode.
ifeq (x86_64,$(ARCH))
LIBDIRS := lib64 lib
CFLAGS_lib := -m32
else
LIBDIRS := lib
endif
# Define directories
RESULT := $(OUTPUT_ROOT)/result
BUILDDIR := $(OUTPUT_ROOT)/$(HOST)/$(TARGET)
PREFIX := $(RESULT)/$(HOST)
TARGETDIR := $(PREFIX)/$(TARGET)
SYSROOT := $(TARGETDIR)/sys-root
DOWNLOAD := $(OUTPUT_ROOT)/download
SRCDIR := $(OUTPUT_ROOT)/src
# Marker file for unpacking rpms
rpms := $(SYSROOT)/rpms_unpacked
# Need to patch libs that are linker scripts to use non-absolute paths
libs := $(SYSROOT)/libs_patched
##########################################################################################
# Unpack source packages
# Generate downloading + unpacking of sources.
define Download
$(1)_DIR = $(abspath $(SRCDIR)/$(basename $(basename $(notdir $($(1))))))
$(1)_CFG = $$($(1)_DIR)/configure
$(1)_FILE = $(DOWNLOAD)/$(notdir $($(1)))
$$($(1)_CFG) : $$($(1)_FILE)
mkdir -p $$(SRCDIR)
tar -C $$(SRCDIR) -x$$(if $$(findstring .gz, $$<),z,j)f $$<
$$(foreach p,$$(abspath $$(wildcard $$(notdir $$($(1)_DIR)).patch)), \
echo PATCHING $$(p) ; \
patch -d $$($(1)_DIR) -p1 -i $$(p) ; \
)
touch $$@
$$($(1)_FILE) :
wget -P $(DOWNLOAD) $$($(1))
endef
# Download and unpack all source packages
$(foreach p,GCC BINUTILS CCACHE MPFR GMP MPC,$(eval $(call Download,$(p))))
##########################################################################################
# Unpack RPMS
# Note. For building linux you should install rpm2cpio.
define unrpm
$(SYSROOT)/$(notdir $(1)).unpacked \
: $(1)
$$(rpms) : $(SYSROOT)/$(notdir $(1)).unpacked
endef
%.unpacked :
$(info Unpacking target rpms and libraries from $<)
@(mkdir -p $(@D); \
cd $(@D); \
rpm2cpio $< | \
cpio --extract --make-directories \
-f \
"./usr/share/doc/*" \
"./usr/share/man/*" \
"./usr/X11R6/man/*" \
"*/X11/locale/*" \
|| die ; )
touch $@
$(foreach p,$(RPM_FILE_LIST),$(eval $(call unrpm,$(p))))
##########################################################################################
# Note: MUST create a <sys-root>/usr/lib even if not really needed.
# gcc will use a path relative to it to resolve lib64. (x86_64).
# we're creating multi-lib compiler with 32bit libc as well, so we should
# have it anyway, but just to make sure...
# Patch libc.so and libpthread.so to force linking against libraries in sysroot
# and not the ones installed on the build machine.
$(libs) : $(rpms)
@echo Patching libc and pthreads
@(for f in `find $(SYSROOT) -name libc.so -o -name libpthread.so`; do \
(cat $$f | sed -e 's|/usr/lib64/||g' \
-e 's|/usr/lib/||g' \
-e 's|/lib64/||g' \
-e 's|/lib/||g' ) > $$f.tmp ; \
mv $$f.tmp $$f ; \
done)
@mkdir -p $(SYSROOT)/usr/lib
@touch $@
##########################################################################################
# Define marker files for each source package to be compiled
$(foreach t,binutils mpfr gmp mpc gcc ccache,$(eval $(t) = $(TARGETDIR)/$($(t)_ver).done))
##########################################################################################
# Default base config
CONFIG = --target=$(TARGET) \
--host=$(HOST) --build=$(BUILD) \
--prefix=$(PREFIX)
PATHEXT = $(RESULT)/$(BUILD)/bin:
PATHPRE = PATH=$(PATHEXT)$(PATH)
BUILDPAR = -j16
# Default commands to when making
MAKECMD =
INSTALLCMD = install
declare_tools = CC$(1)=$(2)gcc LD$(1)=$(2)ld AR$(1)=$(2)ar AS$(1)=$(2)as RANLIB$(1)=$(2)ranlib CXX$(1)=$(2)g++ OBJDUMP$(1)=$(2)objdump
ifeq ($(HOST),$(BUILD))
ifeq ($(HOST),$(TARGET))
TOOLS = $(call declare_tools,_FOR_TARGET,)
endif
endif
TOOLS ?= $(call declare_tools,_FOR_TARGET,$(TARGET)-)
##########################################################################################
# Create a TARGET bfd + libiberty only.
# Configure one or two times depending on mulitlib arch.
# If multilib, the second should be 32-bit, and we resolve
# CFLAG_<name> to most likely -m32.
define mk_bfd
$$(info Libs for $(1))
$$(BUILDDIR)/$$(binutils_ver)-$(subst /,-,$(1))/Makefile \
: CFLAGS += $$(CFLAGS_$(1))
$$(BUILDDIR)/$$(binutils_ver)-$(subst /,-,$(1))/Makefile \
: LIBDIRS = --libdir=$(TARGETDIR)/$(1)
bfdlib += $$(TARGETDIR)/$$(binutils_ver)-$(subst /,-,$(1)).done
bfdmakes += $$(BUILDDIR)/$$(binutils_ver)-$(subst /,-,$(1))/Makefile
endef
# Create one set of bfds etc for each multilib arch
$(foreach l,$(LIBDIRS),$(eval $(call mk_bfd,$(l))))
# Only build these two libs.
$(bfdlib) : MAKECMD = all-libiberty all-bfd
$(bfdlib) : INSTALLCMD = install-libiberty install-bfd
# Building targets libbfd + libiberty. HOST==TARGET, i.e not
# for a cross env.
$(bfdmakes) : CONFIG = --target=$(TARGET) \
--host=$(TARGET) --build=$(BUILD) \
--prefix=$(TARGETDIR) \
--with-sysroot=$(SYSROOT) \
$(LIBDIRS)
$(bfdmakes) : TOOLS = $(call declare_tools,_FOR_TARGET,$(TARGET)-) $(call declare_tools,,$(TARGET)-)
##########################################################################################
$(gcc) \
$(binutils) \
$(gmp) \
$(mpfr) \
$(mpc) \
$(bfdmakes) \
$(ccache) : ENVS += $(TOOLS)
# libdir to work around hateful bfd stuff installing into wrong dirs...
# ensure we have 64 bit bfd support in the HOST library. I.e our
# compiler on i686 will know 64 bit symbols, BUT later
# we build just the libs again for TARGET, then with whatever the arch
# wants.
$(BUILDDIR)/$(binutils_ver)/Makefile : CONFIG += --enable-64-bit-bfd --libdir=$(PREFIX)/$(word 1,$(LIBDIRS))
# Makefile creation. Simply run configure in build dir.
$(bfdmakes) \
$(BUILDDIR)/$(binutils_ver)/Makefile \
: $(BINUTILS_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) CFLAGS="$(CFLAGS)" \
$(BINUTILS_CFG) \
$(CONFIG) \
--with-sysroot=$(SYSROOT) \
--disable-nls \
--program-prefix=$(TARGET)- \
--enable-multilib \
) > $(@D)/log.config 2>&1
@echo 'done'
$(BUILDDIR)/$(mpfr_ver)/Makefile \
: $(MPFR_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) CFLAGS="$(CFLAGS)" \
$(MPFR_CFG) \
$(CONFIG) \
--program-prefix=$(TARGET)- \
--enable-shared=no \
--with-gmp=$(PREFIX) \
) > $(@D)/log.config 2>&1
@echo 'done'
$(BUILDDIR)/$(gmp_ver)/Makefile \
: $(GMP_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) CFLAGS="$(CFLAGS)" \
$(GMP_CFG) \
--host=$(HOST) --build=$(BUILD) \
--prefix=$(PREFIX) \
--disable-nls \
--program-prefix=$(TARGET)- \
--enable-shared=no \
--with-mpfr=$(PREFIX) \
) > $(@D)/log.config 2>&1
@echo 'done'
$(BUILDDIR)/$(mpc_ver)/Makefile \
: $(MPC_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) CFLAGS="$(CFLAGS)" \
$(MPC_CFG) \
$(CONFIG) \
--program-prefix=$(TARGET)- \
--enable-shared=no \
--with-mpfr=$(PREFIX) \
--with-gmp=$(PREFIX) \
) > $(@D)/log.config 2>&1
@echo 'done'
# Only valid if glibc target -> linux
# proper destructor handling for c++
ifneq (,$(findstring linux,$(TARGET)))
$(BUILDDIR)/$(gcc_ver)/Makefile : CONFIG += --enable-__cxa_atexit
endif
# Want:
# c,c++
# shared libs
# multilib (-m32/-m64 on x64)
# skip native language.
# and link and assemble with the binutils we created
# earlier, so --with-gnu*
$(BUILDDIR)/$(gcc_ver)/Makefile \
: $(GCC_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
mkdir -p $(@D)
( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) $(GCC_CFG) $(EXTRA_CFLAGS) \
$(CONFIG) \
--with-sysroot=$(SYSROOT) \
--enable-languages=c,c++ \
--enable-shared \
--enable-multilib \
--disable-nls \
--with-gnu-as \
--with-gnu-ld \
--with-mpfr=$(PREFIX) \
--with-gmp=$(PREFIX) \
--with-mpc=$(PREFIX) \
) > $(@D)/log.config 2>&1
@echo 'done'
# need binutils for gcc
$(gcc) : $(binutils)
# as of 4.3 or so need these for doing config
$(BUILDDIR)/$(gcc_ver)/Makefile : $(gmp) $(mpfr) $(mpc)
$(mpfr) : $(gmp)
$(mpc) : $(gmp) $(mpfr)
##########################################################################################
# very straightforward. just build a ccache. it is only for host.
$(BUILDDIR)/$(ccache_ver)/Makefile \
: $(CCACHE_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
@( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) $(CCACHE_CFG) \
$(CONFIG) \
) > $(@D)/log.config 2>&1
@echo 'done'
gccpatch = $(TARGETDIR)/gcc-patched
##########################################################################################
# For some reason cpp is not created as a target-compiler
ifeq ($(HOST),$(TARGET))
$(gccpatch) : $(gcc) link_libs
@echo -n 'Creating compiler symlinks...'
@for f in cpp; do \
if [ ! -e $(PREFIX)/bin/$(TARGET)-$$f ];\
then \
cd $(PREFIX)/bin && \
ln -s $$f $(TARGET)-$$f ; \
fi \
done
@touch $@
@echo 'done'
##########################################################################################
# Ugly at best. Seems that when we compile host->host compiler, that are NOT
# the BUILD compiler, the result will not try searching for libs in package root.
# "Solve" this by create links from the target libdirs to where they are.
link_libs:
@echo -n 'Creating library symlinks...'
@$(foreach l,$(LIBDIRS), \
for f in `cd $(PREFIX)/$(l) && ls`; do \
if [ ! -e $(TARGETDIR)/$(l)/$$f ]; then \
mkdir -p $(TARGETDIR)/$(l) && \
cd $(TARGETDIR)/$(l)/ && \
ln -s $(if $(findstring /,$(l)),../,)../../$(l)/$$f $$f; \
fi \
done;)
@echo 'done'
else
$(gccpatch) :
@echo 'done'
endif
##########################################################################################
# Build in two steps.
# make <default>
# make install.
# Use path to our build hosts cross tools
# Always need to build cross tools for build host self.
$(TARGETDIR)/%.done : $(BUILDDIR)/%/Makefile
$(info Building $(basename $@). Log in $(<D)/log.build)
$(PATHPRE) $(ENVS) $(MAKE) $(BUILDPAR) -f $< -C $(<D) $(MAKECMD) $(MAKECMD.$(notdir $@)) > $(<D)/log.build 2>&1
@echo -n 'installing...'
$(PATHPRE) $(MAKE) $(INSTALLPAR) -f $< -C $(<D) $(INSTALLCMD) $(MAKECMD.$(notdir $@)) > $(<D)/log.install 2>&1
@touch $@
@echo 'done'
##########################################################################################
bfdlib : $(bfdlib)
binutils : $(binutils)
rpms : $(rpms)
libs : $(libs)
sysroot : rpms libs
gcc : sysroot $(gcc) $(gccpatch)
all : binutils gcc bfdlib
# this is only built for host. so separate.
ccache : $(ccache)
.PHONY : gcc all binutils bfdlib link_libs rpms libs sysroot

View File

@ -215,3 +215,4 @@ c8286839d0df04aba819ec4bef12b86babccf30e jdk8-b90
8f7ffb296385f85a4a6d53f9f2d4a7b13a8fa1ff jdk8-b91 8f7ffb296385f85a4a6d53f9f2d4a7b13a8fa1ff jdk8-b91
717aa26f8e0a1c0e768aebb3a763aca56db0c83e jdk8-b92 717aa26f8e0a1c0e768aebb3a763aca56db0c83e jdk8-b92
8dc9d7ccbb2d77fd89bc321bb02e67c152aca257 jdk8-b93 8dc9d7ccbb2d77fd89bc321bb02e67c152aca257 jdk8-b93
22f5d7f261d9d61a953d2d9a53f2e9ce0ca361d1 jdk8-b94

View File

@ -349,3 +349,5 @@ b19517cecc2e91636d7c16ba2f35e3d3dc628099 hs25-b33
573d86d412cd9d3df7912194c1a540be50e9544e jdk8-b93 573d86d412cd9d3df7912194c1a540be50e9544e jdk8-b93
b786c04b7be15194febe88dc1f0c9443e737a84b hs25-b35 b786c04b7be15194febe88dc1f0c9443e737a84b hs25-b35
3c78a14da19d26d6937af5f98b97e2a21c653b04 hs25-b36 3c78a14da19d26d6937af5f98b97e2a21c653b04 hs25-b36
1beed1f6f9edefe47ba8ed1355fbd3e7606b8288 jdk8-b94
69689078dff8b21e6df30870464f5d736eebdf72 hs25-b37

View File

@ -486,7 +486,7 @@ $(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/services/%
JFR_EXISTS=$(shell if [ -d $(HS_ALT_SRC) ]; then echo 1; else echo 0; fi) JFR_EXISTS=$(shell if [ -d $(HS_ALT_SRC) ]; then echo 1; else echo 0; fi)
# export jfr.h # export jfr.h
ifeq ($JFR_EXISTS,1) ifeq ($JFR_EXISTS,1)
$(EXPORT_INCLUDE_DIR)/%: $(HS_ALT_SRC)/share/vm/jfr/agent/% $(EXPORT_INCLUDE_DIR)/%: $(HS_ALT_SRC)/share/vm/jfr/%
$(install-file) $(install-file)
else else
$(EXPORT_INCLUDE_DIR)/jfr.h: $(EXPORT_INCLUDE_DIR)/jfr.h:

View File

@ -47,6 +47,7 @@
# flags.make - with macro settings # flags.make - with macro settings
# vm.make - to support making "$(MAKE) -v vm.make" in makefiles # vm.make - to support making "$(MAKE) -v vm.make" in makefiles
# adlc.make - # adlc.make -
# trace.make - generate tracing event and type definitions
# jvmti.make - generate JVMTI bindings from the spec (JSR-163) # jvmti.make - generate JVMTI bindings from the spec (JSR-163)
# sa.make - generate SA jar file and natives # sa.make - generate SA jar file and natives
# #
@ -119,6 +120,7 @@ SIMPLE_DIRS = \
$(PLATFORM_DIR)/generated/dependencies \ $(PLATFORM_DIR)/generated/dependencies \
$(PLATFORM_DIR)/generated/adfiles \ $(PLATFORM_DIR)/generated/adfiles \
$(PLATFORM_DIR)/generated/jvmtifiles \ $(PLATFORM_DIR)/generated/jvmtifiles \
$(PLATFORM_DIR)/generated/tracefiles \
$(PLATFORM_DIR)/generated/dtracefiles $(PLATFORM_DIR)/generated/dtracefiles
TARGETS = debug fastdebug optimized product TARGETS = debug fastdebug optimized product
@ -128,7 +130,7 @@ SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
# dtrace.make is used on BSD versions that implement Dtrace (like MacOS X) # dtrace.make is used on BSD versions that implement Dtrace (like MacOS X)
BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make dtrace.make BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make dtrace.make
BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
@ -331,6 +333,16 @@ jvmti.make: $(BUILDTREE_MAKE)
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
) > $@ ) > $@
trace.make: $(BUILDTREE_MAKE)
@echo Creating $@ ...
$(QUIETLY) ( \
$(BUILDTREE_COMMENT); \
echo; \
echo include flags.make; \
echo; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
) > $@
sa.make: $(BUILDTREE_MAKE) sa.make: $(BUILDTREE_MAKE)
@echo Creating $@ ... @echo Creating $@ ...
$(QUIETLY) ( \ $(QUIETLY) ( \

View File

@ -19,7 +19,7 @@
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any # or visit www.oracle.com if you need additional information or have any
# questions. # questions.
# #
# #
TYPE=MINIMAL1 TYPE=MINIMAL1
@ -32,6 +32,7 @@ INCLUDE_SERVICES ?= false
INCLUDE_MANAGEMENT ?= false INCLUDE_MANAGEMENT ?= false
INCLUDE_ALL_GCS ?= false INCLUDE_ALL_GCS ?= false
INCLUDE_NMT ?= false INCLUDE_NMT ?= false
INCLUDE_TRACE ?= false
INCLUDE_CDS ?= false INCLUDE_CDS ?= false
CXXFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\" CXXFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\"

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -80,7 +80,7 @@ default: vm_build_preliminaries the_vm
@echo All done. @echo All done.
# This is an explicit dependency for the sake of parallel makes. # This is an explicit dependency for the sake of parallel makes.
vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff sa_stuff dtrace_stuff vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff dtrace_stuff
@# We need a null action here, so implicit rules don't get consulted. @# We need a null action here, so implicit rules don't get consulted.
$(Cached_plat): $(Plat_File) $(Cached_plat): $(Plat_File)
@ -94,6 +94,10 @@ ad_stuff: $(Cached_plat) $(adjust-mflags)
jvmti_stuff: $(Cached_plat) $(adjust-mflags) jvmti_stuff: $(Cached_plat) $(adjust-mflags)
@$(MAKE) -f jvmti.make $(MFLAGS-adjusted) @$(MAKE) -f jvmti.make $(MFLAGS-adjusted)
# generate trace files
trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags)
@$(MAKE) -f trace.make $(MFLAGS-adjusted)
ifeq ($(OS_VENDOR), Darwin) ifeq ($(OS_VENDOR), Darwin)
# generate dtrace header files # generate dtrace header files
dtrace_stuff: $(Cached_plat) $(adjust-mflags) dtrace_stuff: $(Cached_plat) $(adjust-mflags)

View File

@ -0,0 +1,121 @@
#
# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# This makefile (trace.make) is included from the trace.make in the
# build directories.
#
# It knows how to build and run the tools to generate trace files.
include $(GAMMADIR)/make/bsd/makefiles/rules.make
include $(GAMMADIR)/make/altsrc.make
# #########################################################################
HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
echo "true"; else echo "false";\
fi)
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
JvmtiOutDir = $(GENERATED)/jvmtifiles
TraceOutDir = $(GENERATED)/tracefiles
TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace
TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace
# set VPATH so make knows where to look for source files
Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir)
VPATH += $(Src_Dirs_V:%=%:)
TraceGeneratedNames = \
traceEventClasses.hpp \
traceEventIds.hpp \
traceTypes.hpp
ifeq ($(HAS_ALT_SRC), true)
TraceGeneratedNames += \
traceRequestables.hpp \
traceEventControl.hpp
ifneq ($(INCLUDE_TRACE), false)
TraceGeneratedNames += traceProducer.cpp
endif
endif
TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
endif
.PHONY: all clean cleanall
# #########################################################################
all: $(TraceGeneratedFiles)
GENERATE_CODE= \
$(QUIETLY) echo Generating $@; \
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
test -f $@
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(GENERATE_CODE)
ifeq ($(HAS_ALT_SRC), false)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
else
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(GENERATE_CODE)
endif
# #########################################################################
clean cleanall:
rm $(TraceGeneratedFiles)

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -19,7 +19,7 @@
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any # or visit www.oracle.com if you need additional information or have any
# questions. # questions.
# #
# #
# Rules to build JVM and related libraries, included from vm.make in the build # Rules to build JVM and related libraries, included from vm.make in the build
@ -52,7 +52,7 @@ endif
# Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm
# The adfiles directory contains ad_<arch>.[ch]pp. # The adfiles directory contains ad_<arch>.[ch]pp.
# The jvmtifiles directory contains jvmti*.[ch]pp # The jvmtifiles directory contains jvmti*.[ch]pp
Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
VPATH += $(Src_Dirs_V:%=%:) VPATH += $(Src_Dirs_V:%=%:)
# set INCLUDES for C preprocessor. # set INCLUDES for C preprocessor.
@ -66,7 +66,7 @@ else
SYMFLAG = SYMFLAG =
endif endif
# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined # HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined
# in $(GAMMADIR)/make/defs.make # in $(GAMMADIR)/make/defs.make
ifeq ($(HOTSPOT_BUILD_VERSION),) ifeq ($(HOTSPOT_BUILD_VERSION),)
BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)\"" BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)\""
@ -93,7 +93,7 @@ CXXFLAGS = \
# This is VERY important! The version define must only be supplied to vm_version.o # This is VERY important! The version define must only be supplied to vm_version.o
# If not, ccache will not re-use the cache at all, since the version string might contain # If not, ccache will not re-use the cache at all, since the version string might contain
# a time and date. # a time and date.
CXXFLAGS/vm_version.o += ${JRE_VERSION} CXXFLAGS/vm_version.o += ${JRE_VERSION}
CXXFLAGS/BYFILE = $(CXXFLAGS/$@) CXXFLAGS/BYFILE = $(CXXFLAGS/$@)
@ -105,10 +105,6 @@ ifdef DEFAULT_LIBPATH
CXXFLAGS += -DDEFAULT_LIBPATH="\"$(DEFAULT_LIBPATH)\"" CXXFLAGS += -DDEFAULT_LIBPATH="\"$(DEFAULT_LIBPATH)\""
endif endif
ifndef JAVASE_EMBEDDED
CFLAGS += -DINCLUDE_TRACE
endif
# CFLAGS_WARN holds compiler options to suppress/enable warnings. # CFLAGS_WARN holds compiler options to suppress/enable warnings.
CFLAGS += $(CFLAGS_WARN/BYFILE) CFLAGS += $(CFLAGS_WARN/BYFILE)
@ -165,15 +161,15 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm
SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
ifndef JAVASE_EMBEDDED CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
ifneq ($(INCLUDE_TRACE), false)
CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
find $(HS_ALT_SRC)/share/vm/jfr -type d; \ find $(HS_ALT_SRC)/share/vm/jfr -type d; \
fi) fi)
endif endif
CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
CORE_PATHS+=$(GENERATED)/jvmtifiles
COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1) COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1 COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -19,7 +19,7 @@
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any # or visit www.oracle.com if you need additional information or have any
# questions. # questions.
# #
# #
# The common definitions for hotspot builds. # The common definitions for hotspot builds.
@ -236,7 +236,7 @@ ifneq ($(ALT_JDK_IMAGE_DIR),)
JDK_IMAGE_DIR=$(ALT_JDK_IMAGE_DIR) JDK_IMAGE_DIR=$(ALT_JDK_IMAGE_DIR)
endif endif
# The platform dependent defs.make defines platform specific variable such # The platform dependent defs.make defines platform specific variable such
# as ARCH, EXPORT_LIST etc. We must place the include here after BOOTDIR is defined. # as ARCH, EXPORT_LIST etc. We must place the include here after BOOTDIR is defined.
include $(GAMMADIR)/make/$(OSNAME)/makefiles/defs.make include $(GAMMADIR)/make/$(OSNAME)/makefiles/defs.make
@ -258,7 +258,7 @@ ifneq ($(OSNAME),windows)
# LIBARCH - directory name in JDK/JRE # LIBARCH - directory name in JDK/JRE
# Use uname output for SRCARCH, but deal with platform differences. If ARCH # Use uname output for SRCARCH, but deal with platform differences. If ARCH
# is not explicitly listed below, it is treated as x86. # is not explicitly listed below, it is treated as x86.
SRCARCH = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc zero,$(ARCH))) SRCARCH = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc zero,$(ARCH)))
ARCH/ = x86 ARCH/ = x86
ARCH/sparc = sparc ARCH/sparc = sparc
@ -337,8 +337,5 @@ EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jni.h
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h
ifndef JAVASE_EMBEDDED
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jfr.h
endif
.PHONY: $(HS_ALT_MAKE)/defs.make .PHONY: $(HS_ALT_MAKE)/defs.make

View File

@ -19,7 +19,7 @@
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any # or visit www.oracle.com if you need additional information or have any
# questions. # questions.
# #
# #
ifeq ($(INCLUDE_JVMTI), false) ifeq ($(INCLUDE_JVMTI), false)
CXXFLAGS += -DINCLUDE_JVMTI=0 CXXFLAGS += -DINCLUDE_JVMTI=0
@ -100,7 +100,7 @@ ifeq ($(INCLUDE_ALL_GCS), false)
parCardTableModRefBS.cpp parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp \ parCardTableModRefBS.cpp parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp \
gSpaceCounters.cpp allocationStats.cpp spaceCounters.cpp gcAdaptivePolicyCounters.cpp \ gSpaceCounters.cpp allocationStats.cpp spaceCounters.cpp gcAdaptivePolicyCounters.cpp \
mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp
endif endif
ifeq ($(INCLUDE_NMT), false) ifeq ($(INCLUDE_NMT), false)
CXXFLAGS += -DINCLUDE_NMT=0 CXXFLAGS += -DINCLUDE_NMT=0
@ -110,3 +110,5 @@ ifeq ($(INCLUDE_NMT), false)
memBaseline.cpp memPtr.cpp memRecorder.cpp memReporter.cpp memSnapshot.cpp memTrackWorker.cpp \ memBaseline.cpp memPtr.cpp memRecorder.cpp memReporter.cpp memSnapshot.cpp memTrackWorker.cpp \
memTracker.cpp nmtDCmd.cpp memTracker.cpp nmtDCmd.cpp
endif endif
-include $(HS_ALT_MAKE)/excludeSrc.make

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
HS_MAJOR_VER=25 HS_MAJOR_VER=25
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=36 HS_BUILD_NUMBER=37
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=8 JDK_MINOR_VER=8

View File

@ -47,6 +47,7 @@
# flags.make - with macro settings # flags.make - with macro settings
# vm.make - to support making "$(MAKE) -v vm.make" in makefiles # vm.make - to support making "$(MAKE) -v vm.make" in makefiles
# adlc.make - # adlc.make -
# trace.make - generate tracing event and type definitions
# jvmti.make - generate JVMTI bindings from the spec (JSR-163) # jvmti.make - generate JVMTI bindings from the spec (JSR-163)
# sa.make - generate SA jar file and natives # sa.make - generate SA jar file and natives
# #
@ -114,7 +115,8 @@ COMPILER = $(shell sed -n 's/^compiler[ ]*=[ ]*//p' $(PLATFORM_FILE))
SIMPLE_DIRS = \ SIMPLE_DIRS = \
$(PLATFORM_DIR)/generated/dependencies \ $(PLATFORM_DIR)/generated/dependencies \
$(PLATFORM_DIR)/generated/adfiles \ $(PLATFORM_DIR)/generated/adfiles \
$(PLATFORM_DIR)/generated/jvmtifiles $(PLATFORM_DIR)/generated/jvmtifiles \
$(PLATFORM_DIR)/generated/tracefiles
TARGETS = debug fastdebug optimized product TARGETS = debug fastdebug optimized product
SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
@ -122,7 +124,7 @@ SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
# For dependencies and recursive makes. # For dependencies and recursive makes.
BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make
BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
@ -269,6 +271,8 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
echo && \ echo && \
echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \ echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \ echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \
[ -n "$(INCLUDE_TRACE)" ] && \
echo && echo "INCLUDE_TRACE = $(INCLUDE_TRACE)"; \
echo; \ echo; \
[ -n "$(SPEC)" ] && \ [ -n "$(SPEC)" ] && \
echo "include $(SPEC)"; \ echo "include $(SPEC)"; \
@ -337,6 +341,16 @@ jvmti.make: $(BUILDTREE_MAKE)
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
) > $@ ) > $@
trace.make: $(BUILDTREE_MAKE)
@echo Creating $@ ...
$(QUIETLY) ( \
$(BUILDTREE_COMMENT); \
echo; \
echo include flags.make; \
echo; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
) > $@
sa.make: $(BUILDTREE_MAKE) sa.make: $(BUILDTREE_MAKE)
@echo Creating $@ ... @echo Creating $@ ...
$(QUIETLY) ( \ $(QUIETLY) ( \

View File

@ -19,7 +19,7 @@
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any # or visit www.oracle.com if you need additional information or have any
# questions. # questions.
# #
# #
TYPE=MINIMAL1 TYPE=MINIMAL1
@ -32,6 +32,7 @@ INCLUDE_SERVICES ?= false
INCLUDE_MANAGEMENT ?= false INCLUDE_MANAGEMENT ?= false
INCLUDE_ALL_GCS ?= false INCLUDE_ALL_GCS ?= false
INCLUDE_NMT ?= false INCLUDE_NMT ?= false
INCLUDE_TRACE ?= false
INCLUDE_CDS ?= false INCLUDE_CDS ?= false
CXXFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\" CXXFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\"

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -80,7 +80,7 @@ default: vm_build_preliminaries the_vm
@echo All done. @echo All done.
# This is an explicit dependency for the sake of parallel makes. # This is an explicit dependency for the sake of parallel makes.
vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff sa_stuff vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) trace_stuff jvmti_stuff sa_stuff
@# We need a null action here, so implicit rules don't get consulted. @# We need a null action here, so implicit rules don't get consulted.
$(Cached_plat): $(Plat_File) $(Cached_plat): $(Plat_File)
@ -94,6 +94,10 @@ ad_stuff: $(Cached_plat) $(adjust-mflags)
jvmti_stuff: $(Cached_plat) $(adjust-mflags) jvmti_stuff: $(Cached_plat) $(adjust-mflags)
@$(MAKE) -f jvmti.make $(MFLAGS-adjusted) @$(MAKE) -f jvmti.make $(MFLAGS-adjusted)
# generate trace files
trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags)
@$(MAKE) -f trace.make $(MFLAGS-adjusted)
# generate SA jar files and native header # generate SA jar files and native header
sa_stuff: sa_stuff:
@$(MAKE) -f sa.make $(MFLAGS-adjusted) @$(MAKE) -f sa.make $(MFLAGS-adjusted)

View File

@ -0,0 +1,120 @@
#
# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# This makefile (trace.make) is included from the trace.make in the
# build directories.
#
# It knows how to build and run the tools to generate trace files.
include $(GAMMADIR)/make/linux/makefiles/rules.make
include $(GAMMADIR)/make/altsrc.make
# #########################################################################
HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
echo "true"; else echo "false";\
fi)
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
JvmtiOutDir = $(GENERATED)/jvmtifiles
TraceOutDir = $(GENERATED)/tracefiles
TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace
TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace
# set VPATH so make knows where to look for source files
Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir)
VPATH += $(Src_Dirs_V:%=%:)
TraceGeneratedNames = \
traceEventClasses.hpp \
traceEventIds.hpp \
traceTypes.hpp
ifeq ($(HAS_ALT_SRC), true)
TraceGeneratedNames += \
traceRequestables.hpp \
traceEventControl.hpp
ifneq ($(INCLUDE_TRACE), false)
TraceGeneratedNames += traceProducer.cpp
endif
endif
TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
endif
.PHONY: all clean cleanall
# #########################################################################
all: $(TraceGeneratedFiles)
GENERATE_CODE= \
$(QUIETLY) echo Generating $@; \
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
test -f $@
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(GENERATE_CODE)
ifeq ($(HAS_ALT_SRC), false)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
else
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(GENERATE_CODE)
endif
# #########################################################################
clean cleanall:
rm $(TraceGeneratedFiles)

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -19,7 +19,7 @@
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any # or visit www.oracle.com if you need additional information or have any
# questions. # questions.
# #
# #
# Rules to build JVM and related libraries, included from vm.make in the build # Rules to build JVM and related libraries, included from vm.make in the build
@ -52,7 +52,7 @@ endif
# Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm
# The adfiles directory contains ad_<arch>.[ch]pp. # The adfiles directory contains ad_<arch>.[ch]pp.
# The jvmtifiles directory contains jvmti*.[ch]pp # The jvmtifiles directory contains jvmti*.[ch]pp
Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
VPATH += $(Src_Dirs_V:%=%:) VPATH += $(Src_Dirs_V:%=%:)
# set INCLUDES for C preprocessor. # set INCLUDES for C preprocessor.
@ -72,7 +72,7 @@ else
endif endif
endif endif
# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined # HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined
# in $(GAMMADIR)/make/defs.make # in $(GAMMADIR)/make/defs.make
ifeq ($(HOTSPOT_BUILD_VERSION),) ifeq ($(HOTSPOT_BUILD_VERSION),)
BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)\"" BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)\""
@ -99,7 +99,7 @@ CXXFLAGS = \
# This is VERY important! The version define must only be supplied to vm_version.o # This is VERY important! The version define must only be supplied to vm_version.o
# If not, ccache will not re-use the cache at all, since the version string might contain # If not, ccache will not re-use the cache at all, since the version string might contain
# a time and date. # a time and date.
CXXFLAGS/vm_version.o += ${JRE_VERSION} CXXFLAGS/vm_version.o += ${JRE_VERSION}
CXXFLAGS/BYFILE = $(CXXFLAGS/$@) CXXFLAGS/BYFILE = $(CXXFLAGS/$@)
@ -108,12 +108,6 @@ CXXFLAGS/BYFILE = $(CXXFLAGS/$@)
CXXFLAGS += $(CXXFLAGS/BYFILE) CXXFLAGS += $(CXXFLAGS/BYFILE)
ifndef JAVASE_EMBEDDED
ifneq (${ARCH},arm)
CFLAGS += -DINCLUDE_TRACE
endif
endif
# CFLAGS_WARN holds compiler options to suppress/enable warnings. # CFLAGS_WARN holds compiler options to suppress/enable warnings.
CFLAGS += $(CFLAGS_WARN/BYFILE) CFLAGS += $(CFLAGS_WARN/BYFILE)
@ -158,16 +152,14 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm
SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
ifndef JAVASE_EMBEDDED CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
ifneq (${ARCH},arm) CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
ifneq ($(INCLUDE_TRACE), false)
CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
find $(HS_ALT_SRC)/share/vm/jfr -type d; \ find $(HS_ALT_SRC)/share/vm/jfr -type d; \
fi) fi)
endif endif
endif
CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
CORE_PATHS+=$(GENERATED)/jvmtifiles
COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1) COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1 COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1
@ -316,7 +308,7 @@ endif
# With more recent Redhat releases (or the cutting edge version Fedora), if # With more recent Redhat releases (or the cutting edge version Fedora), if
# SELinux is configured to be enabled, the runtime linker will fail to apply # SELinux is configured to be enabled, the runtime linker will fail to apply
# the text relocation to libjvm.so considering that it is built as a non-PIC # the text relocation to libjvm.so considering that it is built as a non-PIC
# DSO. To workaround that, we run chcon to libjvm.so after it is built. See # DSO. To workaround that, we run chcon to libjvm.so after it is built. See
# details in bug 6538311. # details in bug 6538311.
$(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT) $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
$(QUIETLY) { \ $(QUIETLY) { \

View File

@ -47,6 +47,7 @@
# flags.make - with macro settings # flags.make - with macro settings
# vm.make - to support making "$(MAKE) -v vm.make" in makefiles # vm.make - to support making "$(MAKE) -v vm.make" in makefiles
# adlc.make - # adlc.make -
# trace.make - generate tracing event and type definitions
# jvmti.make - generate JVMTI bindings from the spec (JSR-163) # jvmti.make - generate JVMTI bindings from the spec (JSR-163)
# sa.make - generate SA jar file and natives # sa.make - generate SA jar file and natives
# #
@ -107,7 +108,8 @@ COMPILER = $(shell sed -n 's/^compiler[ ]*=[ ]*//p' $(PLATFORM_FILE))
SIMPLE_DIRS = \ SIMPLE_DIRS = \
$(PLATFORM_DIR)/generated/dependencies \ $(PLATFORM_DIR)/generated/dependencies \
$(PLATFORM_DIR)/generated/adfiles \ $(PLATFORM_DIR)/generated/adfiles \
$(PLATFORM_DIR)/generated/jvmtifiles $(PLATFORM_DIR)/generated/jvmtifiles \
$(PLATFORM_DIR)/generated/tracefiles
TARGETS = debug fastdebug optimized product TARGETS = debug fastdebug optimized product
SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
@ -115,7 +117,7 @@ SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
# For dependencies and recursive makes. # For dependencies and recursive makes.
BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make
BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
@ -327,6 +329,16 @@ jvmti.make: $(BUILDTREE_MAKE)
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
) > $@ ) > $@
trace.make: $(BUILDTREE_MAKE)
@echo Creating $@ ...
$(QUIETLY) ( \
$(BUILDTREE_COMMENT); \
echo; \
echo include flags.make; \
echo; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
) > $@
sa.make: $(BUILDTREE_MAKE) sa.make: $(BUILDTREE_MAKE)
@echo Creating $@ ... @echo Creating $@ ...
$(QUIETLY) ( \ $(QUIETLY) ( \

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -73,7 +73,7 @@ default: vm_build_preliminaries the_vm
@echo All done. @echo All done.
# This is an explicit dependency for the sake of parallel makes. # This is an explicit dependency for the sake of parallel makes.
vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff sa_stuff vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff
@# We need a null action here, so implicit rules don't get consulted. @# We need a null action here, so implicit rules don't get consulted.
$(Cached_plat): $(Plat_File) $(Cached_plat): $(Plat_File)
@ -87,6 +87,10 @@ ad_stuff: $(Cached_plat) $(adjust-mflags)
jvmti_stuff: $(Cached_plat) $(adjust-mflags) jvmti_stuff: $(Cached_plat) $(adjust-mflags)
@$(MAKE) -f jvmti.make $(MFLAGS-adjusted) @$(MAKE) -f jvmti.make $(MFLAGS-adjusted)
# generate trace files
trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags)
@$(MAKE) -f trace.make $(MFLAGS-adjusted)
# generate SA jar files and native header # generate SA jar files and native header
sa_stuff: sa_stuff:
@$(MAKE) -f sa.make $(MFLAGS-adjusted) @$(MAKE) -f sa.make $(MFLAGS-adjusted)
@ -127,5 +131,5 @@ realclean:
rm -fr $(GENERATED) rm -fr $(GENERATED)
.PHONY: default vm_build_preliminaries .PHONY: default vm_build_preliminaries
.PHONY: lists ad_stuff jvmti_stuff sa_stuff the_vm clean realclean .PHONY: lists ad_stuff jvmti_stuff trace_stuff sa_stuff the_vm clean realclean
.PHONY: checks check_os_version install .PHONY: checks check_os_version install

View File

@ -0,0 +1,116 @@
#
# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# This makefile (trace.make) is included from the trace.make in the
# build directories.
#
# It knows how to build and run the tools to generate trace files.
include $(GAMMADIR)/make/solaris/makefiles/rules.make
include $(GAMMADIR)/make/altsrc.make
# #########################################################################
HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
echo "true"; else echo "false";\
fi)
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
JvmtiOutDir = $(GENERATED)/jvmtifiles
TraceOutDir = $(GENERATED)/tracefiles
TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace
TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace
# set VPATH so make knows where to look for source files
Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir)
VPATH += $(Src_Dirs_V:%=%:)
TraceGeneratedNames = \
traceEventClasses.hpp \
traceEventIds.hpp \
traceTypes.hpp
ifeq ($(HAS_ALT_SRC), true)
TraceGeneratedNames += \
traceRequestables.hpp \
traceEventControl.hpp \
traceProducer.cpp
endif
TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
endif
.PHONY: all clean cleanall
# #########################################################################
all: $(TraceGeneratedFiles)
GENERATE_CODE= \
$(QUIETLY) echo Generating $@; \
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
test -f $@
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(GENERATE_CODE)
ifeq ($(HAS_ALT_SRC), false)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
else
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(GENERATE_CODE)
endif
# #########################################################################
clean cleanall:
rm $(TraceGeneratedFiles)

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -19,7 +19,7 @@
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any # or visit www.oracle.com if you need additional information or have any
# questions. # questions.
# #
# #
# Rules to build JVM and related libraries, included from vm.make in the build # Rules to build JVM and related libraries, included from vm.make in the build
@ -48,7 +48,7 @@ include $(MAKEFILES_DIR)/$(BUILDARCH).make
# Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm
# The adfiles directory contains ad_<arch>.[ch]pp. # The adfiles directory contains ad_<arch>.[ch]pp.
# The jvmtifiles directory contains jvmti*.[ch]pp # The jvmtifiles directory contains jvmti*.[ch]pp
Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
VPATH += $(Src_Dirs_V:%=%:) VPATH += $(Src_Dirs_V:%=%:)
# set INCLUDES for C preprocessor # set INCLUDES for C preprocessor
@ -87,7 +87,7 @@ CXXFLAGS = \
# This is VERY important! The version define must only be supplied to vm_version.o # This is VERY important! The version define must only be supplied to vm_version.o
# If not, ccache will not re-use the cache at all, since the version string might contain # If not, ccache will not re-use the cache at all, since the version string might contain
# a time and date. # a time and date.
CXXFLAGS/vm_version.o += ${JRE_VERSION} CXXFLAGS/vm_version.o += ${JRE_VERSION}
CXXFLAGS/BYFILE = $(CXXFLAGS/$@) CXXFLAGS/BYFILE = $(CXXFLAGS/$@)
@ -103,7 +103,7 @@ CFLAGS += $(CFLAGS_WARN)
CFLAGS += $(CFLAGS/NOEX) CFLAGS += $(CFLAGS/NOEX)
# Extra flags from gnumake's invocation or environment # Extra flags from gnumake's invocation or environment
CFLAGS += $(EXTRA_CFLAGS) -DINCLUDE_TRACE CFLAGS += $(EXTRA_CFLAGS)
# Math Library (libm.so), do not use -lm. # Math Library (libm.so), do not use -lm.
# There might be two versions of libm.so on the build system: # There might be two versions of libm.so on the build system:
@ -137,9 +137,7 @@ else
LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc -ldemangle LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc -ldemangle
endif # sparcWorks endif # sparcWorks
ifeq ("${Platform_arch}", "sparc")
LIBS += -lkstat LIBS += -lkstat
endif
# By default, link the *.o into the library, not the executable. # By default, link the *.o into the library, not the executable.
LINK_INTO$(LINK_INTO) = LIBJVM LINK_INTO$(LINK_INTO) = LIBJVM
@ -177,12 +175,14 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm
SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \ CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
ifneq ($(INCLUDE_TRACE), false)
CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
find $(HS_ALT_SRC)/share/vm/jfr -type d; \ find $(HS_ALT_SRC)/share/vm/jfr -type d; \
fi) fi)
endif
CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
CORE_PATHS+=$(GENERATED)/jvmtifiles
COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1) COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1 COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1
@ -287,7 +287,7 @@ else
LINK_VM = $(LINK_LIB.CXX) LINK_VM = $(LINK_LIB.CXX)
endif endif
# making the library: # making the library:
$(LIBJVM): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LIBJVM): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(LIBJVM.o) $(LIBJVM_MAPFILE)
ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),) ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),)
@echo Linking vm... @echo Linking vm...
$(QUIETLY) $(LINK_LIB.CXX/PRE_HOOK) $(QUIETLY) $(LINK_LIB.CXX/PRE_HOOK)

View File

@ -196,6 +196,12 @@ HS_BUILD_VER=$(HOTSPOT_RELEASE_VERSION)-$(HOTSPOT_BUILD_VERSION)
# End VERSIONINFO parameters # End VERSIONINFO parameters
# if hotspot-only build and/or OPENJDK isn't passed down, need to set OPENJDK
!ifndef OPENJDK
!if !exists($(WorkSpace)\src\closed)
OPENJDK=true
!endif
!endif
# We don't support SA on ia64, and we can't # We don't support SA on ia64, and we can't
# build it if we are using a version of Vis Studio # build it if we are using a version of Vis Studio
@ -273,6 +279,7 @@ $(variantDir)\local.make: checks
@ echo HS_COMPANY=$(COMPANY_NAME) >> $@ @ echo HS_COMPANY=$(COMPANY_NAME) >> $@
@ echo HS_FILEDESC=$(HS_FILEDESC) >> $@ @ echo HS_FILEDESC=$(HS_FILEDESC) >> $@
@ echo HOTSPOT_VM_DISTRO=$(HOTSPOT_VM_DISTRO) >> $@ @ echo HOTSPOT_VM_DISTRO=$(HOTSPOT_VM_DISTRO) >> $@
@ if "$(OPENJDK)" NEQ "" echo OPENJDK=$(OPENJDK) >> $@
@ echo HS_COPYRIGHT=$(HOTSPOT_VM_COPYRIGHT) >> $@ @ echo HS_COPYRIGHT=$(HOTSPOT_VM_COPYRIGHT) >> $@
@ echo HS_NAME=$(PRODUCT_NAME) $(JDK_MKTG_VERSION) >> $@ @ echo HS_NAME=$(PRODUCT_NAME) $(JDK_MKTG_VERSION) >> $@
@ echo HS_BUILD_VER=$(HS_BUILD_VER) >> $@ @ echo HS_BUILD_VER=$(HS_BUILD_VER) >> $@

View File

@ -71,13 +71,11 @@ for sd in \
BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/${sd}" BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/${sd}"
done done
BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles" BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles ${GENERATED}/tracefiles"
if [ -d "${ALTSRC}/share/vm/jfr" ]; then if [ -d "${ALTSRC}/share/vm/jfr" ]; then
BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/agent" BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr"
BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/agent/isolated_deps/util" BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/buffers"
BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/jvm"
BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr"
fi fi
BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/prims/wbtestmethods" BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/prims/wbtestmethods"

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -30,15 +30,19 @@
JvmtiOutDir=jvmtifiles JvmtiOutDir=jvmtifiles
!include $(WorkSpace)/make/windows/makefiles/jvmti.make !include $(WorkSpace)/make/windows/makefiles/jvmti.make
# Pick up rules for building trace
TraceOutDir=tracefiles
!include $(WorkSpace)/make/windows/makefiles/trace.make
# Pick up rules for building SA # Pick up rules for building SA
!include $(WorkSpace)/make/windows/makefiles/sa.make !include $(WorkSpace)/make/windows/makefiles/sa.make
AdlcOutDir=adfiles AdlcOutDir=adfiles
!if ("$(Variant)" == "compiler2") || ("$(Variant)" == "tiered") !if ("$(Variant)" == "compiler2") || ("$(Variant)" == "tiered")
default:: $(AdlcOutDir)/ad_$(Platform_arch_model).cpp $(AdlcOutDir)/dfa_$(Platform_arch_model).cpp $(JvmtiGeneratedFiles) buildobjfiles default:: $(AdlcOutDir)/ad_$(Platform_arch_model).cpp $(AdlcOutDir)/dfa_$(Platform_arch_model).cpp $(JvmtiGeneratedFiles) $(TraceGeneratedFiles) buildobjfiles
!else !else
default:: $(JvmtiGeneratedFiles) buildobjfiles default:: $(JvmtiGeneratedFiles) $(TraceGeneratedFiles) buildobjfiles
!endif !endif
buildobjfiles: buildobjfiles:

View File

@ -19,7 +19,7 @@
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any # or visit www.oracle.com if you need additional information or have any
# questions. # questions.
# #
# #
!include $(WorkSpace)/make/windows/makefiles/rules.make !include $(WorkSpace)/make/windows/makefiles/rules.make
@ -72,7 +72,7 @@ ProjectCreatorIncludesPRIVATE=\
-ignorePath ppc \ -ignorePath ppc \
-ignorePath zero \ -ignorePath zero \
-hidePath .hg -hidePath .hg
# This is referenced externally by both the IDE and batch builds # This is referenced externally by both the IDE and batch builds
ProjectCreatorOptions= ProjectCreatorOptions=
@ -89,7 +89,7 @@ ProjectCreatorIDEOptions = \
-disablePch bytecodeInterpreter.cpp \ -disablePch bytecodeInterpreter.cpp \
-disablePch bytecodeInterpreterWithChecks.cpp \ -disablePch bytecodeInterpreterWithChecks.cpp \
-disablePch getThread_windows_$(Platform_arch).cpp \ -disablePch getThread_windows_$(Platform_arch).cpp \
-disablePch_compiler2 opcodes.cpp -disablePch_compiler2 opcodes.cpp
# Common options for the IDE builds for core, c1, and c2 # Common options for the IDE builds for core, c1, and c2
ProjectCreatorIDEOptions=\ ProjectCreatorIDEOptions=\
@ -115,7 +115,7 @@ ProjectCreatorIDEOptions=\
-define TARGET_OS_ARCH_windows_x86 \ -define TARGET_OS_ARCH_windows_x86 \
-define TARGET_OS_FAMILY_windows \ -define TARGET_OS_FAMILY_windows \
-define TARGET_COMPILER_visCPP \ -define TARGET_COMPILER_visCPP \
-define INCLUDE_TRACE \ -define INCLUDE_TRACE=1 \
$(ProjectCreatorIncludesPRIVATE) $(ProjectCreatorIncludesPRIVATE)
# Add in build-specific options # Add in build-specific options
@ -203,4 +203,12 @@ ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \
-additionalFile jvmtiEnter.cpp \ -additionalFile jvmtiEnter.cpp \
-additionalFile jvmtiEnterTrace.cpp \ -additionalFile jvmtiEnterTrace.cpp \
-additionalFile jvmti.h \ -additionalFile jvmti.h \
-additionalFile bytecodeInterpreterWithChecks.cpp -additionalFile bytecodeInterpreterWithChecks.cpp \
-additionalFile traceEventClasses.hpp \
-additionalFile traceEventIds.hpp \
!if "$(OPENJDK)" != "true"
-additionalFile traceRequestables.hpp \
-additionalFile traceEventControl.hpp \
-additionalFile traceProducer.cpp \
!endif
-additionalFile traceTypes.hpp

View File

@ -0,0 +1,121 @@
#
# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# This makefile (trace.make) is included from the trace.make in the
# build directories.
#
# It knows how to build and run the tools to generate trace files.
!include $(WorkSpace)/make/windows/makefiles/rules.make
# #########################################################################
TraceAltSrcDir = $(WorkSpace)/src/closed/share/vm/trace
TraceSrcDir = $(WorkSpace)/src/share/vm/trace
TraceGeneratedNames = \
traceEventClasses.hpp \
traceEventIds.hpp \
traceTypes.hpp
!if "$(OPENJDK)" != "true"
TraceGeneratedNames = $(TraceGeneratedNames) \
traceRequestables.hpp \
traceEventControl.hpp \
traceProducer.cpp
!endif
#Note: TraceGeneratedFiles must be kept in sync with TraceGeneratedNames by hand.
#Should be equivalent to "TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)"
TraceGeneratedFiles = \
$(TraceOutDir)/traceEventClasses.hpp \
$(TraceOutDir)/traceEventIds.hpp \
$(TraceOutDir)/traceTypes.hpp
!if "$(OPENJDK)" != "true"
TraceGeneratedFiles = $(TraceGeneratedFiles) \
$(TraceOutDir)/traceRequestables.hpp \
$(TraceOutDir)/traceEventControl.hpp \
$(TraceOutDir)/traceProducer.cpp
!endif
XSLT = $(QUIETLY) $(REMOTE) $(RUN_JAVA) -classpath $(JvmtiOutDir) jvmtiGen
XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
!if "$(OPENJDK)" != "true"
XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceevents.xml
!endif
.PHONY: all clean cleanall
# #########################################################################
default::
@if not exist $(TraceOutDir) mkdir $(TraceOutDir)
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
@echo Generating $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventIds.xsl -OUT $(TraceOutDir)/traceEventIds.hpp
$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
@echo Generating $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceTypes.xsl -OUT $(TraceOutDir)/traceTypes.hpp
!if "$(OPENJDK)" == "true"
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
@echo Generating $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
!else
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
@echo Generating $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
@echo Generating $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceProducer.xsl -OUT $(TraceOutDir)/traceProducer.cpp
$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
@echo Generating $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp
$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
@echo Generating $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventControl.xsl -OUT $(TraceOutDir)/traceEventControl.hpp
!endif
# #########################################################################
cleanall :
rm $(TraceGeneratedFiles)

View File

@ -66,10 +66,6 @@ CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_BUILD_TARGET=\"$(BUILD_FLAVOR)\""
CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\"" CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\""
CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\"" CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\""
!ifndef JAVASE_EMBEDDED
CXX_FLAGS=$(CXX_FLAGS) /D "INCLUDE_TRACE"
!endif
CXX_FLAGS=$(CXX_FLAGS) $(CXX_INCLUDE_DIRS) CXX_FLAGS=$(CXX_FLAGS) $(CXX_INCLUDE_DIRS)
# Define that so jni.h is on correct side # Define that so jni.h is on correct side
@ -144,6 +140,7 @@ CXX_USE_PCH=$(CXX_DONT_USE_PCH)
VM_PATH=../generated VM_PATH=../generated
VM_PATH=$(VM_PATH);../generated/adfiles VM_PATH=$(VM_PATH);../generated/adfiles
VM_PATH=$(VM_PATH);../generated/jvmtifiles VM_PATH=$(VM_PATH);../generated/jvmtifiles
VM_PATH=$(VM_PATH);../generated/tracefiles
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/c1 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/c1
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/compiler VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/compiler
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/code VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/code
@ -172,10 +169,8 @@ VM_PATH=$(VM_PATH);$(WorkSpace)/src/cpu/$(Platform_arch)/vm
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/opto VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/opto
!if exists($(ALTSRC)\share\vm\jfr) !if exists($(ALTSRC)\share\vm\jfr)
VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/agent
VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/agent/isolated_deps/util
VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/jvm
VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr
VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/buffers
!endif !endif
VM_PATH={$(VM_PATH)} VM_PATH={$(VM_PATH)}
@ -384,16 +379,13 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
{..\generated\jvmtifiles}.cpp.obj:: {..\generated\jvmtifiles}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{..\generated\tracefiles}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\jfr}.cpp.obj:: {$(ALTSRC)\share\vm\jfr}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\jfr\agent}.cpp.obj:: {$(ALTSRC)\share\vm\jfr\buffers}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\jfr\agent\isolated_deps\util}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\jfr\jvm}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
default:: default::

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -45,6 +45,12 @@ BootStrapDir=$(HOTSPOTJDKDIST)
!endif !endif
!endif !endif
# if hotspot-only build and/or OPENJDK isn't passed down, need to set OPENJDK
!ifndef OPENJDK
!if !exists($(WorkSpace)\src\closed)
OPENJDK=true
!endif
!endif
!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/projectcreator.make !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/projectcreator.make
@ -54,6 +60,10 @@ BootStrapDir=$(HOTSPOTJDKDIST)
JvmtiOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\jvmtifiles JvmtiOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\jvmtifiles
!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/jvmti.make !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/jvmti.make
# Pick up rules for building trace
TraceOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\tracefiles
!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/trace.make
!if "$(Variant)" == "compiler2" !if "$(Variant)" == "compiler2"
# Pick up rules for building adlc # Pick up rules for building adlc
!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/adlc.make !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/adlc.make
@ -66,7 +76,7 @@ JvmtiOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\jvmtifiles
HS_INTERNAL_NAME=jvm HS_INTERNAL_NAME=jvm
default:: $(AdditionalTargets) $(JvmtiGeneratedFiles) default:: $(AdditionalTargets) $(JvmtiGeneratedFiles) $(TraceGeneratedFiles)
!include $(HOTSPOTWORKSPACE)/make/hotspot_version !include $(HOTSPOTWORKSPACE)/make/hotspot_version

View File

@ -252,6 +252,16 @@ bool frame::safe_for_sender(JavaThread *thread) {
return false; return false;
} }
// Could be a zombie method
if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
return false;
}
// Could be a zombie method
if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
return false;
}
// It should be safe to construct the sender though it might not be valid // It should be safe to construct the sender though it might not be valid
frame sender(_SENDER_SP, younger_sp, adjusted_stack); frame sender(_SENDER_SP, younger_sp, adjusted_stack);
@ -294,10 +304,10 @@ bool frame::safe_for_sender(JavaThread *thread) {
return jcw_safe; return jcw_safe;
} }
// If the frame size is 0 something is bad because every nmethod has a non-zero frame size // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
// because you must allocate window space // because you must allocate window space
if (sender_blob->frame_size() == 0) { if (sender_blob->frame_size() <= 0) {
assert(!sender_blob->is_nmethod(), "should count return address at least"); assert(!sender_blob->is_nmethod(), "should count return address at least");
return false; return false;
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,7 @@
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp" #include "runtime/javaCalls.hpp"
#include "runtime/monitorChunk.hpp" #include "runtime/monitorChunk.hpp"
#include "runtime/os.hpp"
#include "runtime/signature.hpp" #include "runtime/signature.hpp"
#include "runtime/stubCodeGenerator.hpp" #include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp" #include "runtime/stubRoutines.hpp"
@ -54,16 +55,22 @@ bool frame::safe_for_sender(JavaThread *thread) {
address sp = (address)_sp; address sp = (address)_sp;
address fp = (address)_fp; address fp = (address)_fp;
address unextended_sp = (address)_unextended_sp; address unextended_sp = (address)_unextended_sp;
// sp must be within the stack
bool sp_safe = (sp <= thread->stack_base()) && // consider stack guards when trying to determine "safe" stack pointers
(sp >= thread->stack_base() - thread->stack_size()); static size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
size_t usable_stack_size = thread->stack_size() - stack_guard_size;
// sp must be within the usable part of the stack (not in guards)
bool sp_safe = (sp < thread->stack_base()) &&
(sp >= thread->stack_base() - usable_stack_size);
if (!sp_safe) { if (!sp_safe) {
return false; return false;
} }
// unextended sp must be within the stack and above or equal sp // unextended sp must be within the stack and above or equal sp
bool unextended_sp_safe = (unextended_sp <= thread->stack_base()) && bool unextended_sp_safe = (unextended_sp < thread->stack_base()) &&
(unextended_sp >= sp); (unextended_sp >= sp);
if (!unextended_sp_safe) { if (!unextended_sp_safe) {
@ -71,7 +78,8 @@ bool frame::safe_for_sender(JavaThread *thread) {
} }
// an fp must be within the stack and above (but not equal) sp // an fp must be within the stack and above (but not equal) sp
bool fp_safe = (fp <= thread->stack_base()) && (fp > sp); // second evaluation on fp+ is added to handle situation where fp is -1
bool fp_safe = (fp < thread->stack_base() && (fp > sp) && (((fp + (return_addr_offset * sizeof(void*))) < thread->stack_base())));
// We know sp/unextended_sp are safe only fp is questionable here // We know sp/unextended_sp are safe only fp is questionable here
@ -86,6 +94,13 @@ bool frame::safe_for_sender(JavaThread *thread) {
// other generic buffer blobs are more problematic so we just assume they are // other generic buffer blobs are more problematic so we just assume they are
// ok. adapter blobs never have a frame complete and are never ok. // ok. adapter blobs never have a frame complete and are never ok.
// check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
if (!Interpreter::contains(_pc) && _cb->frame_size() <= 0) {
//assert(0, "Invalid frame_size");
return false;
}
if (!_cb->is_frame_complete_at(_pc)) { if (!_cb->is_frame_complete_at(_pc)) {
if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) { if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
return false; return false;
@ -107,7 +122,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
address jcw = (address)entry_frame_call_wrapper(); address jcw = (address)entry_frame_call_wrapper();
bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > fp); bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > fp);
return jcw_safe; return jcw_safe;
@ -134,12 +149,6 @@ bool frame::safe_for_sender(JavaThread *thread) {
sender_pc = (address) *(sender_sp-1); sender_pc = (address) *(sender_sp-1);
} }
// We must always be able to find a recognizable pc
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
if (sender_pc == NULL || sender_blob == NULL) {
return false;
}
// If the potential sender is the interpreter then we can do some more checking // If the potential sender is the interpreter then we can do some more checking
if (Interpreter::contains(sender_pc)) { if (Interpreter::contains(sender_pc)) {
@ -149,7 +158,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// is really a frame pointer. // is really a frame pointer.
intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset); intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp); bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
if (!saved_fp_safe) { if (!saved_fp_safe) {
return false; return false;
@ -163,6 +172,17 @@ bool frame::safe_for_sender(JavaThread *thread) {
} }
// We must always be able to find a recognizable pc
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
if (sender_pc == NULL || sender_blob == NULL) {
return false;
}
// Could be a zombie method
if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
return false;
}
// Could just be some random pointer within the codeBlob // Could just be some random pointer within the codeBlob
if (!sender_blob->code_contains(sender_pc)) { if (!sender_blob->code_contains(sender_pc)) {
return false; return false;
@ -174,10 +194,9 @@ bool frame::safe_for_sender(JavaThread *thread) {
} }
// Could be the call_stub // Could be the call_stub
if (StubRoutines::returns_to_call_stub(sender_pc)) { if (StubRoutines::returns_to_call_stub(sender_pc)) {
intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset); intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp); bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
if (!saved_fp_safe) { if (!saved_fp_safe) {
return false; return false;
@ -190,15 +209,24 @@ bool frame::safe_for_sender(JavaThread *thread) {
// Validate the JavaCallWrapper an entry frame must have // Validate the JavaCallWrapper an entry frame must have
address jcw = (address)sender.entry_frame_call_wrapper(); address jcw = (address)sender.entry_frame_call_wrapper();
bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > (address)sender.fp()); bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > (address)sender.fp());
return jcw_safe; return jcw_safe;
} }
// If the frame size is 0 something is bad because every nmethod has a non-zero frame size if (sender_blob->is_nmethod()) {
nmethod* nm = sender_blob->as_nmethod_or_null();
if (nm != NULL) {
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc)) {
return false;
}
}
}
// If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
// because the return address counts against the callee's frame. // because the return address counts against the callee's frame.
if (sender_blob->frame_size() == 0) { if (sender_blob->frame_size() <= 0) {
assert(!sender_blob->is_nmethod(), "should count return address at least"); assert(!sender_blob->is_nmethod(), "should count return address at least");
return false; return false;
} }
@ -208,7 +236,9 @@ bool frame::safe_for_sender(JavaThread *thread) {
// should not be anything but the call stub (already covered), the interpreter (already covered) // should not be anything but the call stub (already covered), the interpreter (already covered)
// or an nmethod. // or an nmethod.
assert(sender_blob->is_nmethod(), "Impossible call chain"); if (!sender_blob->is_nmethod()) {
return false;
}
// Could put some more validation for the potential non-interpreted sender // Could put some more validation for the potential non-interpreted sender
// frame we'd create by calling sender if I could think of any. Wait for next crash in forte... // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -94,7 +94,7 @@ public:
// flags that support signal based suspend/resume on Bsd are in a // flags that support signal based suspend/resume on Bsd are in a
// separate class to avoid confusion with many flags in OSThread that // separate class to avoid confusion with many flags in OSThread that
// are used by VM level suspend/resume. // are used by VM level suspend/resume.
os::Bsd::SuspendResume sr; os::SuspendResume sr;
// _ucontext and _siginfo are used by SR_handler() to save thread context, // _ucontext and _siginfo are used by SR_handler() to save thread context,
// and they will later be used to walk the stack or reposition thread PC. // and they will later be used to walk the stack or reposition thread PC.

View File

@ -1852,17 +1852,118 @@ static volatile jint pending_signals[NSIG+1] = { 0 };
// Bsd(POSIX) specific hand shaking semaphore. // Bsd(POSIX) specific hand shaking semaphore.
#ifdef __APPLE__ #ifdef __APPLE__
static semaphore_t sig_sem; typedef semaphore_t os_semaphore_t;
#define SEM_INIT(sem, value) semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, value) #define SEM_INIT(sem, value) semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, value)
#define SEM_WAIT(sem) semaphore_wait(sem); #define SEM_WAIT(sem) semaphore_wait(sem)
#define SEM_POST(sem) semaphore_signal(sem); #define SEM_POST(sem) semaphore_signal(sem)
#define SEM_DESTROY(sem) semaphore_destroy(mach_task_self(), sem)
#else #else
static sem_t sig_sem; typedef sem_t os_semaphore_t;
#define SEM_INIT(sem, value) sem_init(&sem, 0, value) #define SEM_INIT(sem, value) sem_init(&sem, 0, value)
#define SEM_WAIT(sem) sem_wait(&sem); #define SEM_WAIT(sem) sem_wait(&sem)
#define SEM_POST(sem) sem_post(&sem); #define SEM_POST(sem) sem_post(&sem)
#define SEM_DESTROY(sem) sem_destroy(&sem)
#endif #endif
class Semaphore : public StackObj {
public:
Semaphore();
~Semaphore();
void signal();
void wait();
bool trywait();
bool timedwait(unsigned int sec, int nsec);
private:
jlong currenttime() const;
semaphore_t _semaphore;
};
Semaphore::Semaphore() : _semaphore(0) {
SEM_INIT(_semaphore, 0);
}
Semaphore::~Semaphore() {
SEM_DESTROY(_semaphore);
}
void Semaphore::signal() {
SEM_POST(_semaphore);
}
void Semaphore::wait() {
SEM_WAIT(_semaphore);
}
jlong Semaphore::currenttime() const {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * NANOSECS_PER_SEC) + (tv.tv_usec * 1000);
}
#ifdef __APPLE__
bool Semaphore::trywait() {
return timedwait(0, 0);
}
bool Semaphore::timedwait(unsigned int sec, int nsec) {
kern_return_t kr = KERN_ABORTED;
mach_timespec_t waitspec;
waitspec.tv_sec = sec;
waitspec.tv_nsec = nsec;
jlong starttime = currenttime();
kr = semaphore_timedwait(_semaphore, waitspec);
while (kr == KERN_ABORTED) {
jlong totalwait = (sec * NANOSECS_PER_SEC) + nsec;
jlong current = currenttime();
jlong passedtime = current - starttime;
if (passedtime >= totalwait) {
waitspec.tv_sec = 0;
waitspec.tv_nsec = 0;
} else {
jlong waittime = totalwait - (current - starttime);
waitspec.tv_sec = waittime / NANOSECS_PER_SEC;
waitspec.tv_nsec = waittime % NANOSECS_PER_SEC;
}
kr = semaphore_timedwait(_semaphore, waitspec);
}
return kr == KERN_SUCCESS;
}
#else
bool Semaphore::trywait() {
return sem_trywait(&_semaphore) == 0;
}
bool Semaphore::timedwait(unsigned int sec, int nsec) {
struct timespec ts;
jlong endtime = unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
while (1) {
int result = sem_timedwait(&_semaphore, &ts);
if (result == 0) {
return true;
} else if (errno == EINTR) {
continue;
} else if (errno == ETIMEDOUT) {
return false;
} else {
return false;
}
}
}
#endif // __APPLE__
static os_semaphore_t sig_sem;
static Semaphore sr_semaphore;
void os::signal_init_pd() { void os::signal_init_pd() {
// Initialize signal structures // Initialize signal structures
::memset((void*)pending_signals, 0, sizeof(pending_signals)); ::memset((void*)pending_signals, 0, sizeof(pending_signals));
@ -2616,9 +2717,6 @@ void os::hint_no_preempt() {}
static void resume_clear_context(OSThread *osthread) { static void resume_clear_context(OSThread *osthread) {
osthread->set_ucontext(NULL); osthread->set_ucontext(NULL);
osthread->set_siginfo(NULL); osthread->set_siginfo(NULL);
// notify the suspend action is completed, we have now resumed
osthread->sr.clear_suspended();
} }
static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) { static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
@ -2638,7 +2736,7 @@ static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontex
// its signal handlers run and prevents sigwait()'s use with the // its signal handlers run and prevents sigwait()'s use with the
// mutex granting granting signal. // mutex granting granting signal.
// //
// Currently only ever called on the VMThread // Currently only ever called on the VMThread or JavaThread
// //
static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) { static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
// Save and restore errno to avoid confusing native code with EINTR // Save and restore errno to avoid confusing native code with EINTR
@ -2647,38 +2745,48 @@ static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
Thread* thread = Thread::current(); Thread* thread = Thread::current();
OSThread* osthread = thread->osthread(); OSThread* osthread = thread->osthread();
assert(thread->is_VM_thread(), "Must be VMThread"); assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
// read current suspend action
int action = osthread->sr.suspend_action(); os::SuspendResume::State current = osthread->sr.state();
if (action == os::Bsd::SuspendResume::SR_SUSPEND) { if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
suspend_save_context(osthread, siginfo, context); suspend_save_context(osthread, siginfo, context);
// Notify the suspend action is about to be completed. do_suspend() // attempt to switch the state, we assume we had a SUSPEND_REQUEST
// waits until SR_SUSPENDED is set and then returns. We will wait os::SuspendResume::State state = osthread->sr.suspended();
// here for a resume signal and that completes the suspend-other if (state == os::SuspendResume::SR_SUSPENDED) {
// action. do_suspend/do_resume is always called as a pair from sigset_t suspend_set; // signals for sigsuspend()
// the same thread - so there are no races
// notify the caller // get current set of blocked signals and unblock resume signal
osthread->sr.set_suspended(); pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
sigdelset(&suspend_set, SR_signum);
sigset_t suspend_set; // signals for sigsuspend() sr_semaphore.signal();
// wait here until we are resumed
while (1) {
sigsuspend(&suspend_set);
// get current set of blocked signals and unblock resume signal os::SuspendResume::State result = osthread->sr.running();
pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); if (result == os::SuspendResume::SR_RUNNING) {
sigdelset(&suspend_set, SR_signum); sr_semaphore.signal();
break;
} else if (result != os::SuspendResume::SR_SUSPENDED) {
ShouldNotReachHere();
}
}
// wait here until we are resumed } else if (state == os::SuspendResume::SR_RUNNING) {
do { // request was cancelled, continue
sigsuspend(&suspend_set); } else {
// ignore all returns until we get a resume signal ShouldNotReachHere();
} while (osthread->sr.suspend_action() != os::Bsd::SuspendResume::SR_CONTINUE); }
resume_clear_context(osthread); resume_clear_context(osthread);
} else if (current == os::SuspendResume::SR_RUNNING) {
// request was cancelled, continue
} else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
// ignore
} else { } else {
assert(action == os::Bsd::SuspendResume::SR_CONTINUE, "unexpected sr action"); // ignore
// nothing special to do - just leave the handler
} }
errno = old_errno; errno = old_errno;
@ -2722,42 +2830,82 @@ static int SR_initialize() {
return 0; return 0;
} }
static int sr_notify(OSThread* osthread) {
int status = pthread_kill(osthread->pthread_id(), SR_signum);
assert_status(status == 0, status, "pthread_kill");
return status;
}
// "Randomly" selected value for how long we want to spin
// before bailing out on suspending a thread, also how often
// we send a signal to a thread we want to resume
static const int RANDOMLY_LARGE_INTEGER = 1000000;
static const int RANDOMLY_LARGE_INTEGER2 = 100;
// returns true on success and false on error - really an error is fatal // returns true on success and false on error - really an error is fatal
// but this seems the normal response to library errors // but this seems the normal response to library errors
static bool do_suspend(OSThread* osthread) { static bool do_suspend(OSThread* osthread) {
// mark as suspended and send signal assert(osthread->sr.is_running(), "thread should be running");
osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_SUSPEND); assert(!sr_semaphore.trywait(), "semaphore has invalid state");
int status = pthread_kill(osthread->pthread_id(), SR_signum);
assert_status(status == 0, status, "pthread_kill");
// check status and wait until notified of suspension // mark as suspended and send signal
if (status == 0) { if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
for (int i = 0; !osthread->sr.is_suspended(); i++) { // failed to switch, state wasn't running?
os::yield_all(i); ShouldNotReachHere();
}
osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE);
return true;
}
else {
osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE);
return false; return false;
} }
if (sr_notify(osthread) != 0) {
ShouldNotReachHere();
}
// managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
while (true) {
if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
break;
} else {
// timeout
os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
if (cancelled == os::SuspendResume::SR_RUNNING) {
return false;
} else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
// make sure that we consume the signal on the semaphore as well
sr_semaphore.wait();
break;
} else {
ShouldNotReachHere();
return false;
}
}
}
guarantee(osthread->sr.is_suspended(), "Must be suspended");
return true;
} }
static void do_resume(OSThread* osthread) { static void do_resume(OSThread* osthread) {
assert(osthread->sr.is_suspended(), "thread should be suspended"); assert(osthread->sr.is_suspended(), "thread should be suspended");
osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_CONTINUE); assert(!sr_semaphore.trywait(), "invalid semaphore state");
int status = pthread_kill(osthread->pthread_id(), SR_signum); if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
assert_status(status == 0, status, "pthread_kill"); // failed to switch to WAKEUP_REQUEST
// check status and wait unit notified of resumption ShouldNotReachHere();
if (status == 0) { return;
for (int i = 0; osthread->sr.is_suspended(); i++) { }
os::yield_all(i);
while (true) {
if (sr_notify(osthread) == 0) {
if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
if (osthread->sr.is_running()) {
return;
}
}
} else {
ShouldNotReachHere();
} }
} }
osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE);
guarantee(osthread->sr.is_running(), "Must be running!");
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -3508,7 +3656,40 @@ bool os::bind_to_processor(uint processor_id) {
return false; return false;
} }
void os::SuspendedThreadTask::internal_do_task() {
if (do_suspend(_thread->osthread())) {
SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
do_task(context);
do_resume(_thread->osthread());
}
}
/// ///
class PcFetcher : public os::SuspendedThreadTask {
public:
PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
ExtendedPC result();
protected:
void do_task(const os::SuspendedThreadTaskContext& context);
private:
ExtendedPC _epc;
};
ExtendedPC PcFetcher::result() {
guarantee(is_done(), "task is not done yet.");
return _epc;
}
void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
Thread* thread = context.thread();
OSThread* osthread = thread->osthread();
if (osthread->ucontext() != NULL) {
_epc = os::Bsd::ucontext_get_pc((ucontext_t *) context.ucontext());
} else {
// NULL context is unexpected, double-check this is the VMThread
guarantee(thread->is_VM_thread(), "can only be called for VMThread");
}
}
// Suspends the target using the signal mechanism and then grabs the PC before // Suspends the target using the signal mechanism and then grabs the PC before
// resuming the target. Used by the flat-profiler only // resuming the target. Used by the flat-profiler only
@ -3517,22 +3698,9 @@ ExtendedPC os::get_thread_pc(Thread* thread) {
assert(Thread::current()->is_Watcher_thread(), "Must be watcher"); assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
assert(thread->is_VM_thread(), "Can only be called for VMThread"); assert(thread->is_VM_thread(), "Can only be called for VMThread");
ExtendedPC epc; PcFetcher fetcher(thread);
fetcher.run();
OSThread* osthread = thread->osthread(); return fetcher.result();
if (do_suspend(osthread)) {
if (osthread->ucontext() != NULL) {
epc = os::Bsd::ucontext_get_pc(osthread->ucontext());
} else {
// NULL context is unexpected, double-check this is the VMThread
guarantee(thread->is_VM_thread(), "can only be called for VMThread");
}
do_resume(osthread);
}
// failure means pthread_kill failed for some reason - arguably this is
// a fatal problem, but such problems are ignored elsewhere
return epc;
} }
int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
@ -4517,3 +4685,4 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
return n; return n;
} }

View File

@ -145,36 +145,6 @@ class Bsd {
// BsdThreads work-around for 6292965 // BsdThreads work-around for 6292965
static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime); static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
// Bsd suspend/resume support - this helper is a shadow of its former
// self now that low-level suspension is barely used, and old workarounds
// for BsdThreads are no longer needed.
class SuspendResume {
private:
volatile int _suspend_action;
volatile jint _state;
public:
// values for suspend_action:
enum {
SR_NONE = 0x00,
SR_SUSPEND = 0x01, // suspend request
SR_CONTINUE = 0x02, // resume request
SR_SUSPENDED = 0x20 // values for _state: + SR_NONE
};
SuspendResume() { _suspend_action = SR_NONE; _state = SR_NONE; }
int suspend_action() const { return _suspend_action; }
void set_suspend_action(int x) { _suspend_action = x; }
// atomic updates for _state
inline void set_suspended();
inline void clear_suspended();
bool is_suspended() { return _state & SR_SUSPENDED; }
#undef SR_SUSPENDED
};
private: private:
typedef int (*sched_getcpu_func_t)(void); typedef int (*sched_getcpu_func_t)(void);
typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen); typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
@ -250,7 +220,7 @@ class PlatformEvent : public CHeapObj<mtInternal> {
int TryPark () ; int TryPark () ;
int park (jlong millis) ; int park (jlong millis) ;
void SetAssociation (Thread * a) { _Assoc = a ; } void SetAssociation (Thread * a) { _Assoc = a ; }
} ; };
class PlatformParker : public CHeapObj<mtInternal> { class PlatformParker : public CHeapObj<mtInternal> {
protected: protected:
@ -268,6 +238,6 @@ class PlatformParker : public CHeapObj<mtInternal> {
status = pthread_mutex_init (_mutex, NULL); status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init"); assert_status(status == 0, status, "mutex_init");
} }
} ; };
#endif // OS_BSD_VM_OS_BSD_HPP #endif // OS_BSD_VM_OS_BSD_HPP

View File

@ -286,20 +286,4 @@ inline int os::set_sock_opt(int fd, int level, int optname,
return ::setsockopt(fd, level, optname, optval, optlen); return ::setsockopt(fd, level, optname, optval, optlen);
} }
inline void os::Bsd::SuspendResume::set_suspended() {
jint temp, temp2;
do {
temp = _state;
temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp);
} while (temp2 != temp);
}
inline void os::Bsd::SuspendResume::clear_suspended() {
jint temp, temp2;
do {
temp = _state;
temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp);
} while (temp2 != temp);
}
#endif // OS_BSD_VM_OS_BSD_INLINE_HPP #endif // OS_BSD_VM_OS_BSD_INLINE_HPP

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -77,7 +77,7 @@ public:
// flags that support signal based suspend/resume on Linux are in a // flags that support signal based suspend/resume on Linux are in a
// separate class to avoid confusion with many flags in OSThread that // separate class to avoid confusion with many flags in OSThread that
// are used by VM level suspend/resume. // are used by VM level suspend/resume.
os::Linux::SuspendResume sr; os::SuspendResume sr;
// _ucontext and _siginfo are used by SR_handler() to save thread context, // _ucontext and _siginfo are used by SR_handler() to save thread context,
// and they will later be used to walk the stack or reposition thread PC. // and they will later be used to walk the stack or reposition thread PC.

View File

@ -151,6 +151,9 @@ sigset_t SR_sigset;
/* Used to protect dlsym() calls */ /* Used to protect dlsym() calls */
static pthread_mutex_t dl_mutex; static pthread_mutex_t dl_mutex;
// Declarations
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
#ifdef JAVASE_EMBEDDED #ifdef JAVASE_EMBEDDED
class MemNotifyThread: public Thread { class MemNotifyThread: public Thread {
friend class VMStructs; friend class VMStructs;
@ -2407,6 +2410,57 @@ void* os::user_handler() {
return CAST_FROM_FN_PTR(void*, UserHandler); return CAST_FROM_FN_PTR(void*, UserHandler);
} }
class Semaphore : public StackObj {
public:
Semaphore();
~Semaphore();
void signal();
void wait();
bool trywait();
bool timedwait(unsigned int sec, int nsec);
private:
sem_t _semaphore;
};
Semaphore::Semaphore() {
sem_init(&_semaphore, 0, 0);
}
Semaphore::~Semaphore() {
sem_destroy(&_semaphore);
}
void Semaphore::signal() {
sem_post(&_semaphore);
}
void Semaphore::wait() {
sem_wait(&_semaphore);
}
bool Semaphore::trywait() {
return sem_trywait(&_semaphore) == 0;
}
bool Semaphore::timedwait(unsigned int sec, int nsec) {
struct timespec ts;
unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
while (1) {
int result = sem_timedwait(&_semaphore, &ts);
if (result == 0) {
return true;
} else if (errno == EINTR) {
continue;
} else if (errno == ETIMEDOUT) {
return false;
} else {
return false;
}
}
}
extern "C" { extern "C" {
typedef void (*sa_handler_t)(int); typedef void (*sa_handler_t)(int);
typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
@ -2446,6 +2500,7 @@ static volatile jint pending_signals[NSIG+1] = { 0 };
// Linux(POSIX) specific hand shaking semaphore. // Linux(POSIX) specific hand shaking semaphore.
static sem_t sig_sem; static sem_t sig_sem;
static Semaphore sr_semaphore;
void os::signal_init_pd() { void os::signal_init_pd() {
// Initialize signal structures // Initialize signal structures
@ -3559,9 +3614,6 @@ void os::hint_no_preempt() {}
static void resume_clear_context(OSThread *osthread) { static void resume_clear_context(OSThread *osthread) {
osthread->set_ucontext(NULL); osthread->set_ucontext(NULL);
osthread->set_siginfo(NULL); osthread->set_siginfo(NULL);
// notify the suspend action is completed, we have now resumed
osthread->sr.clear_suspended();
} }
static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) { static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
@ -3581,7 +3633,7 @@ static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontex
// its signal handlers run and prevents sigwait()'s use with the // its signal handlers run and prevents sigwait()'s use with the
// mutex granting granting signal. // mutex granting granting signal.
// //
// Currently only ever called on the VMThread // Currently only ever called on the VMThread and JavaThreads (PC sampling)
// //
static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) { static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
// Save and restore errno to avoid confusing native code with EINTR // Save and restore errno to avoid confusing native code with EINTR
@ -3590,38 +3642,46 @@ static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
Thread* thread = Thread::current(); Thread* thread = Thread::current();
OSThread* osthread = thread->osthread(); OSThread* osthread = thread->osthread();
assert(thread->is_VM_thread(), "Must be VMThread"); assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
// read current suspend action
int action = osthread->sr.suspend_action(); os::SuspendResume::State current = osthread->sr.state();
if (action == os::Linux::SuspendResume::SR_SUSPEND) { if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
suspend_save_context(osthread, siginfo, context); suspend_save_context(osthread, siginfo, context);
// Notify the suspend action is about to be completed. do_suspend() // attempt to switch the state, we assume we had a SUSPEND_REQUEST
// waits until SR_SUSPENDED is set and then returns. We will wait os::SuspendResume::State state = osthread->sr.suspended();
// here for a resume signal and that completes the suspend-other if (state == os::SuspendResume::SR_SUSPENDED) {
// action. do_suspend/do_resume is always called as a pair from sigset_t suspend_set; // signals for sigsuspend()
// the same thread - so there are no races
// notify the caller // get current set of blocked signals and unblock resume signal
osthread->sr.set_suspended(); pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
sigdelset(&suspend_set, SR_signum);
sigset_t suspend_set; // signals for sigsuspend() sr_semaphore.signal();
// wait here until we are resumed
while (1) {
sigsuspend(&suspend_set);
// get current set of blocked signals and unblock resume signal os::SuspendResume::State result = osthread->sr.running();
pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); if (result == os::SuspendResume::SR_RUNNING) {
sigdelset(&suspend_set, SR_signum); sr_semaphore.signal();
break;
}
}
// wait here until we are resumed } else if (state == os::SuspendResume::SR_RUNNING) {
do { // request was cancelled, continue
sigsuspend(&suspend_set); } else {
// ignore all returns until we get a resume signal ShouldNotReachHere();
} while (osthread->sr.suspend_action() != os::Linux::SuspendResume::SR_CONTINUE); }
resume_clear_context(osthread); resume_clear_context(osthread);
} else if (current == os::SuspendResume::SR_RUNNING) {
// request was cancelled, continue
} else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
// ignore
} else { } else {
assert(action == os::Linux::SuspendResume::SR_CONTINUE, "unexpected sr action"); // ignore
// nothing special to do - just leave the handler
} }
errno = old_errno; errno = old_errno;
@ -3665,42 +3725,82 @@ static int SR_initialize() {
return 0; return 0;
} }
static int sr_notify(OSThread* osthread) {
int status = pthread_kill(osthread->pthread_id(), SR_signum);
assert_status(status == 0, status, "pthread_kill");
return status;
}
// "Randomly" selected value for how long we want to spin
// before bailing out on suspending a thread, also how often
// we send a signal to a thread we want to resume
static const int RANDOMLY_LARGE_INTEGER = 1000000;
static const int RANDOMLY_LARGE_INTEGER2 = 100;
// returns true on success and false on error - really an error is fatal // returns true on success and false on error - really an error is fatal
// but this seems the normal response to library errors // but this seems the normal response to library errors
static bool do_suspend(OSThread* osthread) { static bool do_suspend(OSThread* osthread) {
// mark as suspended and send signal assert(osthread->sr.is_running(), "thread should be running");
osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_SUSPEND); assert(!sr_semaphore.trywait(), "semaphore has invalid state");
int status = pthread_kill(osthread->pthread_id(), SR_signum);
assert_status(status == 0, status, "pthread_kill");
// check status and wait until notified of suspension // mark as suspended and send signal
if (status == 0) { if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
for (int i = 0; !osthread->sr.is_suspended(); i++) { // failed to switch, state wasn't running?
os::yield_all(i); ShouldNotReachHere();
}
osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE);
return true;
}
else {
osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE);
return false; return false;
} }
if (sr_notify(osthread) != 0) {
ShouldNotReachHere();
}
// managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
while (true) {
if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
break;
} else {
// timeout
os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
if (cancelled == os::SuspendResume::SR_RUNNING) {
return false;
} else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
// make sure that we consume the signal on the semaphore as well
sr_semaphore.wait();
break;
} else {
ShouldNotReachHere();
return false;
}
}
}
guarantee(osthread->sr.is_suspended(), "Must be suspended");
return true;
} }
static void do_resume(OSThread* osthread) { static void do_resume(OSThread* osthread) {
assert(osthread->sr.is_suspended(), "thread should be suspended"); assert(osthread->sr.is_suspended(), "thread should be suspended");
osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_CONTINUE); assert(!sr_semaphore.trywait(), "invalid semaphore state");
int status = pthread_kill(osthread->pthread_id(), SR_signum); if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
assert_status(status == 0, status, "pthread_kill"); // failed to switch to WAKEUP_REQUEST
// check status and wait unit notified of resumption ShouldNotReachHere();
if (status == 0) { return;
for (int i = 0; osthread->sr.is_suspended(); i++) { }
os::yield_all(i);
while (true) {
if (sr_notify(osthread) == 0) {
if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
if (osthread->sr.is_running()) {
return;
}
}
} else {
ShouldNotReachHere();
} }
} }
osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE);
guarantee(osthread->sr.is_running(), "Must be running!");
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -4472,6 +4572,40 @@ bool os::bind_to_processor(uint processor_id) {
/// ///
void os::SuspendedThreadTask::internal_do_task() {
if (do_suspend(_thread->osthread())) {
SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
do_task(context);
do_resume(_thread->osthread());
}
}
class PcFetcher : public os::SuspendedThreadTask {
public:
PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
ExtendedPC result();
protected:
void do_task(const os::SuspendedThreadTaskContext& context);
private:
ExtendedPC _epc;
};
ExtendedPC PcFetcher::result() {
guarantee(is_done(), "task is not done yet.");
return _epc;
}
void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
Thread* thread = context.thread();
OSThread* osthread = thread->osthread();
if (osthread->ucontext() != NULL) {
_epc = os::Linux::ucontext_get_pc((ucontext_t *) context.ucontext());
} else {
// NULL context is unexpected, double-check this is the VMThread
guarantee(thread->is_VM_thread(), "can only be called for VMThread");
}
}
// Suspends the target using the signal mechanism and then grabs the PC before // Suspends the target using the signal mechanism and then grabs the PC before
// resuming the target. Used by the flat-profiler only // resuming the target. Used by the flat-profiler only
ExtendedPC os::get_thread_pc(Thread* thread) { ExtendedPC os::get_thread_pc(Thread* thread) {
@ -4479,22 +4613,9 @@ ExtendedPC os::get_thread_pc(Thread* thread) {
assert(Thread::current()->is_Watcher_thread(), "Must be watcher"); assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
assert(thread->is_VM_thread(), "Can only be called for VMThread"); assert(thread->is_VM_thread(), "Can only be called for VMThread");
ExtendedPC epc; PcFetcher fetcher(thread);
fetcher.run();
OSThread* osthread = thread->osthread(); return fetcher.result();
if (do_suspend(osthread)) {
if (osthread->ucontext() != NULL) {
epc = os::Linux::ucontext_get_pc(osthread->ucontext());
} else {
// NULL context is unexpected, double-check this is the VMThread
guarantee(thread->is_VM_thread(), "can only be called for VMThread");
}
do_resume(osthread);
}
// failure means pthread_kill failed for some reason - arguably this is
// a fatal problem, but such problems are ignored elsewhere
return epc;
} }
int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
@ -5616,4 +5737,5 @@ void MemNotifyThread::start() {
new MemNotifyThread(fd); new MemNotifyThread(fd);
} }
} }
#endif // JAVASE_EMBEDDED #endif // JAVASE_EMBEDDED

View File

@ -210,35 +210,6 @@ class Linux {
// LinuxThreads work-around for 6292965 // LinuxThreads work-around for 6292965
static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime); static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
// Linux suspend/resume support - this helper is a shadow of its former
// self now that low-level suspension is barely used, and old workarounds
// for LinuxThreads are no longer needed.
class SuspendResume {
private:
volatile int _suspend_action;
volatile jint _state;
public:
// values for suspend_action:
enum {
SR_NONE = 0x00,
SR_SUSPEND = 0x01, // suspend request
SR_CONTINUE = 0x02, // resume request
SR_SUSPENDED = 0x20 // values for _state: + SR_NONE
};
SuspendResume() { _suspend_action = SR_NONE; _state = SR_NONE; }
int suspend_action() const { return _suspend_action; }
void set_suspend_action(int x) { _suspend_action = x; }
// atomic updates for _state
inline void set_suspended();
inline void clear_suspended();
bool is_suspended() { return _state & SR_SUSPENDED; }
};
private: private:
typedef int (*sched_getcpu_func_t)(void); typedef int (*sched_getcpu_func_t)(void);
typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen); typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
@ -333,6 +304,6 @@ class PlatformParker : public CHeapObj<mtInternal> {
status = pthread_mutex_init (_mutex, NULL); status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init"); assert_status(status == 0, status, "mutex_init");
} }
} ; };
#endif // OS_LINUX_VM_OS_LINUX_HPP #endif // OS_LINUX_VM_OS_LINUX_HPP

View File

@ -288,20 +288,4 @@ inline int os::set_sock_opt(int fd, int level, int optname,
return ::setsockopt(fd, level, optname, optval, optlen); return ::setsockopt(fd, level, optname, optval, optlen);
} }
inline void os::Linux::SuspendResume::set_suspended() {
jint temp, temp2;
do {
temp = _state;
temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp);
} while (temp2 != temp);
}
inline void os::Linux::SuspendResume::clear_suspended() {
jint temp, temp2;
do {
temp = _state;
temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp);
} while (temp2 != temp);
}
#endif // OS_LINUX_VM_OS_LINUX_INLINE_HPP #endif // OS_LINUX_VM_OS_LINUX_INLINE_HPP

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -41,10 +41,6 @@ void OSThread::pd_initialize() {
_thread_id = 0; _thread_id = 0;
sigemptyset(&_caller_sigmask); sigemptyset(&_caller_sigmask);
_current_callback = NULL;
_current_callback_lock = VM_Version::supports_compare_and_exchange() ? NULL
: new Mutex(Mutex::suspend_resume, "Callback_lock", true);
_saved_interrupt_thread_state = _thread_new; _saved_interrupt_thread_state = _thread_new;
_vm_created_thread = false; _vm_created_thread = false;
} }
@ -52,172 +48,6 @@ void OSThread::pd_initialize() {
void OSThread::pd_destroy() { void OSThread::pd_destroy() {
} }
// Synchronous interrupt support
//
// _current_callback == NULL no pending callback
// == 1 callback_in_progress
// == other value pointer to the pending callback
//
// CAS on v8 is implemented by using a global atomic_memory_operation_lock,
// which is shared by other atomic functions. It is OK for normal uses, but
// dangerous if used after some thread is suspended or if used in signal
// handlers. Instead here we use a special per-thread lock to synchronize
// updating _current_callback if we are running on v8. Note in general trying
// to grab locks after a thread is suspended is not safe, but it is safe for
// updating _current_callback, because synchronous interrupt callbacks are
// currently only used in:
// 1. GetThreadPC_Callback - used by WatcherThread to profile VM thread
// There is no overlap between the callbacks, which means we won't try to
// grab a thread's sync lock after the thread has been suspended while holding
// the same lock.
// used after a thread is suspended
static intptr_t compare_and_exchange_current_callback (
intptr_t callback, intptr_t *addr, intptr_t compare_value, Mutex *sync) {
if (VM_Version::supports_compare_and_exchange()) {
return Atomic::cmpxchg_ptr(callback, addr, compare_value);
} else {
MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
if (*addr == compare_value) {
*addr = callback;
return compare_value;
} else {
return callback;
}
}
}
// used in signal handler
static intptr_t exchange_current_callback(intptr_t callback, intptr_t *addr, Mutex *sync) {
if (VM_Version::supports_compare_and_exchange()) {
return Atomic::xchg_ptr(callback, addr);
} else {
MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
intptr_t cb = *addr;
*addr = callback;
return cb;
}
}
// one interrupt at a time. spin if _current_callback != NULL
int OSThread::set_interrupt_callback(Sync_Interrupt_Callback * cb) {
int count = 0;
while (compare_and_exchange_current_callback(
(intptr_t)cb, (intptr_t *)&_current_callback, (intptr_t)NULL, _current_callback_lock) != NULL) {
while (_current_callback != NULL) {
count++;
#ifdef ASSERT
if ((WarnOnStalledSpinLock > 0) &&
(count % WarnOnStalledSpinLock == 0)) {
warning("_current_callback seems to be stalled: %p", _current_callback);
}
#endif
os::yield_all(count);
}
}
return 0;
}
// reset _current_callback, spin if _current_callback is callback_in_progress
void OSThread::remove_interrupt_callback(Sync_Interrupt_Callback * cb) {
int count = 0;
while (compare_and_exchange_current_callback(
(intptr_t)NULL, (intptr_t *)&_current_callback, (intptr_t)cb, _current_callback_lock) != (intptr_t)cb) {
#ifdef ASSERT
intptr_t p = (intptr_t)_current_callback;
assert(p == (intptr_t)callback_in_progress ||
p == (intptr_t)cb, "wrong _current_callback value");
#endif
while (_current_callback != cb) {
count++;
#ifdef ASSERT
if ((WarnOnStalledSpinLock > 0) &&
(count % WarnOnStalledSpinLock == 0)) {
warning("_current_callback seems to be stalled: %p", _current_callback);
}
#endif
os::yield_all(count);
}
}
}
void OSThread::do_interrupt_callbacks_at_interrupt(InterruptArguments *args) {
Sync_Interrupt_Callback * cb;
cb = (Sync_Interrupt_Callback *)exchange_current_callback(
(intptr_t)callback_in_progress, (intptr_t *)&_current_callback, _current_callback_lock);
if (cb == NULL) {
// signal is delivered too late (thread is masking interrupt signal??).
// there is nothing we need to do because requesting thread has given up.
} else if ((intptr_t)cb == (intptr_t)callback_in_progress) {
fatal("invalid _current_callback state");
} else {
assert(cb->target()->osthread() == this, "wrong target");
cb->execute(args);
cb->leave_callback(); // notify the requester
}
// restore original _current_callback value
intptr_t p;
p = exchange_current_callback((intptr_t)cb, (intptr_t *)&_current_callback, _current_callback_lock);
assert(p == (intptr_t)callback_in_progress, "just checking");
}
// Called by the requesting thread to send a signal to target thread and
// execute "this" callback from the signal handler.
int OSThread::Sync_Interrupt_Callback::interrupt(Thread * target, int timeout) {
// Let signals to the vm_thread go even if the Threads_lock is not acquired
assert(Threads_lock->owned_by_self() || (target == VMThread::vm_thread()),
"must have threads lock to call this");
OSThread * osthread = target->osthread();
// may block if target thread already has a pending callback
osthread->set_interrupt_callback(this);
_target = target;
int rslt = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
assert(rslt == 0, "thr_kill != 0");
bool status = false;
jlong t1 = os::javaTimeMillis();
{ // don't use safepoint check because we might be the watcher thread.
MutexLockerEx ml(_sync, Mutex::_no_safepoint_check_flag);
while (!is_done()) {
status = _sync->wait(Mutex::_no_safepoint_check_flag, timeout);
// status == true if timed out
if (status) break;
// update timeout
jlong t2 = os::javaTimeMillis();
timeout -= t2 - t1;
t1 = t2;
}
}
// reset current_callback
osthread->remove_interrupt_callback(this);
return status;
}
void OSThread::Sync_Interrupt_Callback::leave_callback() {
if (!_sync->owned_by_self()) {
// notify requesting thread
MutexLockerEx ml(_sync, Mutex::_no_safepoint_check_flag);
_is_done = true;
_sync->notify_all();
} else {
// Current thread is interrupted while it is holding the _sync lock, trying
// to grab it again will deadlock. The requester will timeout anyway,
// so just return.
_is_done = true;
}
}
// copied from synchronizer.cpp // copied from synchronizer.cpp
void OSThread::handle_spinlock_contention(int tries) { void OSThread::handle_spinlock_contention(int tries) {
@ -229,3 +59,7 @@ void OSThread::handle_spinlock_contention(int tries) {
os::yield(); // Yield to threads of same or higher priority os::yield(); // Yield to threads of same or higher priority
} }
} }
void OSThread::SR_handler(Thread* thread, ucontext_t* uc) {
os::Solaris::SR_handler(thread, uc);
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -72,61 +72,15 @@
// *************************************************************** // ***************************************************************
public: public:
os::SuspendResume sr;
class InterruptArguments : StackObj {
private:
Thread* _thread; // the thread to signal was dispatched to
ucontext_t* _ucontext; // the machine context at the time of the signal
public:
InterruptArguments(Thread* thread, ucontext_t* ucontext) {
_thread = thread;
_ucontext = ucontext;
}
Thread* thread() const { return _thread; }
ucontext_t* ucontext() const { return _ucontext; }
};
// There are currently no asynchronous callbacks - and we'd better not
// support them in the future either, as they need to be deallocated from
// the interrupt handler, which is not safe; they also require locks to
// protect the callback queue.
class Sync_Interrupt_Callback : private StackObj {
protected:
volatile bool _is_done;
Monitor* _sync;
Thread* _target;
public:
Sync_Interrupt_Callback(Monitor * sync) {
_is_done = false; _target = NULL; _sync = sync;
}
bool is_done() const { return _is_done; }
Thread* target() const { return _target; }
int interrupt(Thread * target, int timeout);
// override to implement the callback.
virtual void execute(InterruptArguments *args) = 0;
void leave_callback();
};
private: private:
ucontext_t* _ucontext;
Sync_Interrupt_Callback * volatile _current_callback;
enum {
callback_in_progress = 1
};
Mutex * _current_callback_lock; // only used on v8
public: public:
ucontext_t* ucontext() const { return _ucontext; }
int set_interrupt_callback (Sync_Interrupt_Callback * cb); void set_ucontext(ucontext_t* ptr) { _ucontext = ptr; }
void remove_interrupt_callback(Sync_Interrupt_Callback * cb); static void SR_handler(Thread* thread, ucontext_t* uc);
void do_interrupt_callbacks_at_interrupt(InterruptArguments *args);
// *************************************************************** // ***************************************************************
// java.lang.Thread.interrupt state. // java.lang.Thread.interrupt state.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,28 +27,6 @@
// Defines the interfaces to Solaris operating systems that vary across platforms // Defines the interfaces to Solaris operating systems that vary across platforms
// This is a simple callback that just fetches a PC for an interrupted thread.
// The thread need not be suspended and the fetched PC is just a hint.
// Returned PC and nPC are not necessarily consecutive.
// This one is currently used for profiling the VMThread ONLY!
// Must be synchronous
class GetThreadPC_Callback : public OSThread::Sync_Interrupt_Callback {
private:
ExtendedPC _addr;
public:
GetThreadPC_Callback(Monitor *sync) :
OSThread::Sync_Interrupt_Callback(sync) { }
ExtendedPC addr() const { return _addr; }
void set_addr(ExtendedPC addr) { _addr = addr; }
void execute(OSThread::InterruptArguments *args);
};
// misc // misc
extern "C" { extern "C" {
void signalHandler(int, siginfo_t*, void*); void signalHandler(int, siginfo_t*, void*);

View File

@ -240,6 +240,8 @@ extern "C" {
static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
} }
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
// Thread Local Storage // Thread Local Storage
// This is common to all Solaris platforms so it is defined here, // This is common to all Solaris platforms so it is defined here,
// in this common file. // in this common file.
@ -2580,6 +2582,57 @@ void* os::user_handler() {
return CAST_FROM_FN_PTR(void*, UserHandler); return CAST_FROM_FN_PTR(void*, UserHandler);
} }
class Semaphore : public StackObj {
public:
Semaphore();
~Semaphore();
void signal();
void wait();
bool trywait();
bool timedwait(unsigned int sec, int nsec);
private:
sema_t _semaphore;
};
Semaphore::Semaphore() {
sema_init(&_semaphore, 0, NULL, NULL);
}
Semaphore::~Semaphore() {
sema_destroy(&_semaphore);
}
void Semaphore::signal() {
sema_post(&_semaphore);
}
void Semaphore::wait() {
sema_wait(&_semaphore);
}
bool Semaphore::trywait() {
return sema_trywait(&_semaphore) == 0;
}
bool Semaphore::timedwait(unsigned int sec, int nsec) {
struct timespec ts;
unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
while (1) {
int result = sema_timedwait(&_semaphore, &ts);
if (result == 0) {
return true;
} else if (errno == EINTR) {
continue;
} else if (errno == ETIME) {
return false;
} else {
return false;
}
}
}
extern "C" { extern "C" {
typedef void (*sa_handler_t)(int); typedef void (*sa_handler_t)(int);
typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
@ -4164,6 +4217,68 @@ void os::hint_no_preempt() {
schedctl_start(schedctl_init()); schedctl_start(schedctl_init());
} }
static void resume_clear_context(OSThread *osthread) {
osthread->set_ucontext(NULL);
}
static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
osthread->set_ucontext(context);
}
static Semaphore sr_semaphore;
void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
// Save and restore errno to avoid confusing native code with EINTR
// after sigsuspend.
int old_errno = errno;
OSThread* osthread = thread->osthread();
assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
os::SuspendResume::State current = osthread->sr.state();
if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
suspend_save_context(osthread, uc);
// attempt to switch the state, we assume we had a SUSPEND_REQUEST
os::SuspendResume::State state = osthread->sr.suspended();
if (state == os::SuspendResume::SR_SUSPENDED) {
sigset_t suspend_set; // signals for sigsuspend()
// get current set of blocked signals and unblock resume signal
thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
sigdelset(&suspend_set, os::Solaris::SIGasync());
sr_semaphore.signal();
// wait here until we are resumed
while (1) {
sigsuspend(&suspend_set);
os::SuspendResume::State result = osthread->sr.running();
if (result == os::SuspendResume::SR_RUNNING) {
sr_semaphore.signal();
break;
}
}
} else if (state == os::SuspendResume::SR_RUNNING) {
// request was cancelled, continue
} else {
ShouldNotReachHere();
}
resume_clear_context(osthread);
} else if (current == os::SuspendResume::SR_RUNNING) {
// request was cancelled, continue
} else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
// ignore
} else {
// ignore
}
errno = old_errno;
}
void os::interrupt(Thread* thread) { void os::interrupt(Thread* thread) {
assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer"); assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
@ -4247,6 +4362,116 @@ int os::message_box(const char* title, const char* message) {
return buf[0] == 'y' || buf[0] == 'Y'; return buf[0] == 'y' || buf[0] == 'Y';
} }
static int sr_notify(OSThread* osthread) {
int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
assert_status(status == 0, status, "thr_kill");
return status;
}
// "Randomly" selected value for how long we want to spin
// before bailing out on suspending a thread, also how often
// we send a signal to a thread we want to resume
static const int RANDOMLY_LARGE_INTEGER = 1000000;
static const int RANDOMLY_LARGE_INTEGER2 = 100;
static bool do_suspend(OSThread* osthread) {
assert(osthread->sr.is_running(), "thread should be running");
assert(!sr_semaphore.trywait(), "semaphore has invalid state");
// mark as suspended and send signal
if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
// failed to switch, state wasn't running?
ShouldNotReachHere();
return false;
}
if (sr_notify(osthread) != 0) {
ShouldNotReachHere();
}
// managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
while (true) {
if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
break;
} else {
// timeout
os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
if (cancelled == os::SuspendResume::SR_RUNNING) {
return false;
} else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
// make sure that we consume the signal on the semaphore as well
sr_semaphore.wait();
break;
} else {
ShouldNotReachHere();
return false;
}
}
}
guarantee(osthread->sr.is_suspended(), "Must be suspended");
return true;
}
static void do_resume(OSThread* osthread) {
assert(osthread->sr.is_suspended(), "thread should be suspended");
assert(!sr_semaphore.trywait(), "invalid semaphore state");
if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
// failed to switch to WAKEUP_REQUEST
ShouldNotReachHere();
return;
}
while (true) {
if (sr_notify(osthread) == 0) {
if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
if (osthread->sr.is_running()) {
return;
}
}
} else {
ShouldNotReachHere();
}
}
guarantee(osthread->sr.is_running(), "Must be running!");
}
void os::SuspendedThreadTask::internal_do_task() {
if (do_suspend(_thread->osthread())) {
SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
do_task(context);
do_resume(_thread->osthread());
}
}
class PcFetcher : public os::SuspendedThreadTask {
public:
PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
ExtendedPC result();
protected:
void do_task(const os::SuspendedThreadTaskContext& context);
private:
ExtendedPC _epc;
};
ExtendedPC PcFetcher::result() {
guarantee(is_done(), "task is not done yet.");
return _epc;
}
void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
Thread* thread = context.thread();
OSThread* osthread = thread->osthread();
if (osthread->ucontext() != NULL) {
_epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
} else {
// NULL context is unexpected, double-check this is the VMThread
guarantee(thread->is_VM_thread(), "can only be called for VMThread");
}
}
// A lightweight implementation that does not suspend the target thread and // A lightweight implementation that does not suspend the target thread and
// thus returns only a hint. Used for profiling only! // thus returns only a hint. Used for profiling only!
ExtendedPC os::get_thread_pc(Thread* thread) { ExtendedPC os::get_thread_pc(Thread* thread) {
@ -4254,21 +4479,9 @@ ExtendedPC os::get_thread_pc(Thread* thread) {
assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock"); assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
// For now, is only used to profile the VM Thread // For now, is only used to profile the VM Thread
assert(thread->is_VM_thread(), "Can only be called for VMThread"); assert(thread->is_VM_thread(), "Can only be called for VMThread");
ExtendedPC epc; PcFetcher fetcher(thread);
fetcher.run();
GetThreadPC_Callback cb(ProfileVM_lock); return fetcher.result();
OSThread *osthread = thread->osthread();
const int time_to_wait = 400; // 400ms wait for initial response
int status = cb.interrupt(thread, time_to_wait);
if (cb.is_done() ) {
epc = cb.addr();
} else {
DEBUG_ONLY(tty->print_cr("Failed to get pc for thread: %d got %d status",
osthread->thread_id(), status););
// epc is already NULL
}
return epc;
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -127,7 +127,6 @@ class Solaris {
static void set_SIGinterrupt(int newsig) { _SIGinterrupt = newsig; } static void set_SIGinterrupt(int newsig) { _SIGinterrupt = newsig; }
static void set_SIGasync(int newsig) { _SIGasync = newsig; } static void set_SIGasync(int newsig) { _SIGasync = newsig; }
public: public:
// Large Page Support--ISM. // Large Page Support--ISM.
static bool largepage_range(char* addr, size_t size); static bool largepage_range(char* addr, size_t size);
@ -145,6 +144,7 @@ class Solaris {
static intptr_t* ucontext_get_sp(ucontext_t* uc); static intptr_t* ucontext_get_sp(ucontext_t* uc);
// ucontext_get_fp() is only used by Solaris X86 (see note below) // ucontext_get_fp() is only used by Solaris X86 (see note below)
static intptr_t* ucontext_get_fp(ucontext_t* uc); static intptr_t* ucontext_get_fp(ucontext_t* uc);
static address ucontext_get_pc(ucontext_t* uc);
// For Analyzer Forte AsyncGetCallTrace profiling support: // For Analyzer Forte AsyncGetCallTrace profiling support:
// Parameter ret_fp is only used by Solaris X86. // Parameter ret_fp is only used by Solaris X86.
@ -157,6 +157,8 @@ class Solaris {
static void hotspot_sigmask(Thread* thread); static void hotspot_sigmask(Thread* thread);
// SR_handler
static void SR_handler(Thread* thread, ucontext_t* uc);
protected: protected:
// Solaris-specific interface goes here // Solaris-specific interface goes here
static julong available_memory(); static julong available_memory();

View File

@ -5048,6 +5048,71 @@ int os::set_sock_opt(int fd, int level, int optname,
return ::setsockopt(fd, level, optname, optval, optlen); return ::setsockopt(fd, level, optname, optval, optlen);
} }
// WINDOWS CONTEXT Flags for THREAD_SAMPLING
#if defined(IA32)
# define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
#elif defined (AMD64)
# define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
#endif
// returns true if thread could be suspended,
// false otherwise
static bool do_suspend(HANDLE* h) {
if (h != NULL) {
if (SuspendThread(*h) != ~0) {
return true;
}
}
return false;
}
// resume the thread
// calling resume on an active thread is a no-op
static void do_resume(HANDLE* h) {
if (h != NULL) {
ResumeThread(*h);
}
}
// retrieve a suspend/resume context capable handle
// from the tid. Caller validates handle return value.
void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) {
if (h != NULL) {
*h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
}
}
//
// Thread sampling implementation
//
void os::SuspendedThreadTask::internal_do_task() {
CONTEXT ctxt;
HANDLE h = NULL;
// get context capable handle for thread
get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
// sanity
if (h == NULL || h == INVALID_HANDLE_VALUE) {
return;
}
// suspend the thread
if (do_suspend(&h)) {
ctxt.ContextFlags = sampling_context_flags;
// get thread context
GetThreadContext(h, &ctxt);
SuspendedThreadTaskContext context(_thread, &ctxt);
// pass context to Thread Sampling impl
do_task(context);
// resume thread
do_resume(&h);
}
// close handle
CloseHandle(h);
}
// Kernel32 API // Kernel32 API
typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void); typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -30,10 +30,16 @@
// currently interrupted by SIGPROF // currently interrupted by SIGPROF
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
void* ucontext, bool isInJava) { void* ucontext, bool isInJava) {
assert(Thread::current() == this, "caller must be current thread"); assert(Thread::current() == this, "caller must be current thread");
assert(this->is_Java_thread(), "must be JavaThread"); return pd_get_top_frame(fr_addr, ucontext, isInJava);
}
bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
return pd_get_top_frame(fr_addr, ucontext, isInJava);
}
bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) {
assert(this->is_Java_thread(), "must be JavaThread");
JavaThread* jt = (JavaThread *)this; JavaThread* jt = (JavaThread *)this;
// If we have a last_Java_frame, then we should use it even if // If we have a last_Java_frame, then we should use it even if

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -61,6 +61,13 @@
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava); bool isInJava);
bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext,
bool isInJava);
private:
bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava);
public:
// These routines are only used on cpu architectures that // These routines are only used on cpu architectures that
// have separate register stacks (Itanium). // have separate register stacks (Itanium).
static bool register_stack_overflow() { return false; } static bool register_stack_overflow() { return false; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -32,8 +32,15 @@ bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
void* ucontext, bool isInJava) { void* ucontext, bool isInJava) {
assert(Thread::current() == this, "caller must be current thread"); assert(Thread::current() == this, "caller must be current thread");
assert(this->is_Java_thread(), "must be JavaThread"); return pd_get_top_frame(fr_addr, ucontext, isInJava);
}
bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
return pd_get_top_frame(fr_addr, ucontext, isInJava);
}
bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) {
assert(this->is_Java_thread(), "must be JavaThread");
JavaThread* jt = (JavaThread *)this; JavaThread* jt = (JavaThread *)this;
// If we have a last_Java_frame, then we should use it even if // If we have a last_Java_frame, then we should use it even if

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -61,6 +61,11 @@
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava); bool isInJava);
bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava);
private:
bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava);
public:
// These routines are only used on cpu architectures that // These routines are only used on cpu architectures that
// have separate register stacks (Itanium). // have separate register stacks (Itanium).
static bool register_stack_overflow() { return false; } static bool register_stack_overflow() { return false; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -194,6 +194,11 @@ intptr_t* os::Solaris::ucontext_get_fp(ucontext_t *uc) {
return NULL; return NULL;
} }
address os::Solaris::ucontext_get_pc(ucontext_t *uc) {
return (address) uc->uc_mcontext.gregs[REG_PC];
}
// For Forte Analyzer AsyncGetCallTrace profiling support - thread // For Forte Analyzer AsyncGetCallTrace profiling support - thread
// is currently interrupted by SIGPROF. // is currently interrupted by SIGPROF.
// //
@ -265,22 +270,6 @@ frame os::current_frame() {
} }
} }
void GetThreadPC_Callback::execute(OSThread::InterruptArguments *args) {
Thread* thread = args->thread();
ucontext_t* uc = args->ucontext();
intptr_t* sp;
assert(ProfileVM && thread->is_VM_thread(), "just checking");
// Skip the mcontext corruption verification. If if occasionally
// things get corrupt, it is ok for profiling - we will just get an unresolved
// function name
ExtendedPC new_addr((address)uc->uc_mcontext.gregs[REG_PC]);
_addr = new_addr;
}
static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) { static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) {
char lwpstatusfile[PROCFILE_LENGTH]; char lwpstatusfile[PROCFILE_LENGTH];
int lwpfd, err; int lwpfd, err;
@ -358,13 +347,8 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs"); guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs");
if (sig == os::Solaris::SIGasync()) { if (sig == os::Solaris::SIGasync()) {
if (thread) { if (thread || vmthread) {
OSThread::InterruptArguments args(thread, uc); OSThread::SR_handler(t, uc);
thread->osthread()->do_interrupt_callbacks_at_interrupt(&args);
return true;
} else if (vmthread) {
OSThread::InterruptArguments args(vmthread, uc);
vmthread->osthread()->do_interrupt_callbacks_at_interrupt(&args);
return true; return true;
} else if (os::Solaris::chained_handler(sig, info, ucVoid)) { } else if (os::Solaris::chained_handler(sig, info, ucVoid)) {
return true; return true;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -36,11 +36,21 @@ bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
void* ucontext, bool isInJava) { void* ucontext, bool isInJava) {
assert(Thread::current() == this, "caller must be current thread"); assert(Thread::current() == this, "caller must be current thread");
return pd_get_top_frame(fr_addr, ucontext, isInJava, true);
}
bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
// get ucontext somehow
return pd_get_top_frame(fr_addr, ucontext, isInJava, false);
}
bool JavaThread::pd_get_top_frame(frame* fr_addr,
void* ucontext, bool isInJava, bool makeWalkable) {
assert(this->is_Java_thread(), "must be JavaThread"); assert(this->is_Java_thread(), "must be JavaThread");
JavaThread* jt = (JavaThread *)this; JavaThread* jt = (JavaThread *)this;
if (!isInJava) { if (!isInJava && makeWalkable) {
// make_walkable flushes register windows and grabs last_Java_pc // make_walkable flushes register windows and grabs last_Java_pc
// which can not be done if the ucontext sp matches last_Java_sp // which can not be done if the ucontext sp matches last_Java_sp
// stack walking utilities assume last_Java_pc set if marked flushed // stack walking utilities assume last_Java_pc set if marked flushed

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -93,6 +93,11 @@ public:
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava); bool isInJava);
bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava);
private:
bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava, bool makeWalkable);
public:
// These routines are only used on cpu architectures that // These routines are only used on cpu architectures that
// have separate register stacks (Itanium). // have separate register stacks (Itanium).
static bool register_stack_overflow() { return false; } static bool register_stack_overflow() { return false; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -183,6 +183,10 @@ intptr_t* os::Solaris::ucontext_get_fp(ucontext_t *uc) {
return (intptr_t*)uc->uc_mcontext.gregs[REG_FP]; return (intptr_t*)uc->uc_mcontext.gregs[REG_FP];
} }
address os::Solaris::ucontext_get_pc(ucontext_t *uc) {
return (address) uc->uc_mcontext.gregs[REG_PC];
}
// For Forte Analyzer AsyncGetCallTrace profiling support - thread // For Forte Analyzer AsyncGetCallTrace profiling support - thread
// is currently interrupted by SIGPROF. // is currently interrupted by SIGPROF.
// //
@ -252,22 +256,6 @@ frame os::current_frame() {
} }
} }
// This is a simple callback that just fetches a PC for an interrupted thread.
// The thread need not be suspended and the fetched PC is just a hint.
// This one is currently used for profiling the VMThread ONLY!
// Must be synchronous
void GetThreadPC_Callback::execute(OSThread::InterruptArguments *args) {
Thread* thread = args->thread();
ucontext_t* uc = args->ucontext();
intptr_t* sp;
assert(ProfileVM && thread->is_VM_thread(), "just checking");
ExtendedPC new_addr((address)uc->uc_mcontext.gregs[REG_PC]);
_addr = new_addr;
}
static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) { static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) {
char lwpstatusfile[PROCFILE_LENGTH]; char lwpstatusfile[PROCFILE_LENGTH];
int lwpfd, err; int lwpfd, err;
@ -419,14 +407,8 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs"); guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs");
if (sig == os::Solaris::SIGasync()) { if (sig == os::Solaris::SIGasync()) {
if(thread){ if(thread || vmthread){
OSThread::InterruptArguments args(thread, uc); OSThread::SR_handler(t, uc);
thread->osthread()->do_interrupt_callbacks_at_interrupt(&args);
return true;
}
else if(vmthread){
OSThread::InterruptArguments args(vmthread, uc);
vmthread->osthread()->do_interrupt_callbacks_at_interrupt(&args);
return true; return true;
} else if (os::Solaris::chained_handler(sig, info, ucVoid)) { } else if (os::Solaris::chained_handler(sig, info, ucVoid)) {
return true; return true;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -30,8 +30,17 @@
// currently interrupted by SIGPROF // currently interrupted by SIGPROF
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
void* ucontext, bool isInJava) { void* ucontext, bool isInJava) {
assert(Thread::current() == this, "caller must be current thread"); assert(Thread::current() == this, "caller must be current thread");
return pd_get_top_frame(fr_addr, ucontext, isInJava);
}
bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr,
void* ucontext, bool isInJava) {
return pd_get_top_frame(fr_addr, ucontext, isInJava);
}
bool JavaThread::pd_get_top_frame(frame* fr_addr,
void* ucontext, bool isInJava) {
assert(this->is_Java_thread(), "must be JavaThread"); assert(this->is_Java_thread(), "must be JavaThread");
JavaThread* jt = (JavaThread *)this; JavaThread* jt = (JavaThread *)this;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -54,6 +54,12 @@
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava); bool isInJava);
bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext,
bool isInJava);
private:
bool pd_get_top_frame(frame* fr_addr, void* ucontext,
bool isInJava);
public:
// These routines are only used on cpu architectures that // These routines are only used on cpu architectures that
// have separate register stacks (Itanium). // have separate register stacks (Itanium).

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -32,6 +32,15 @@ bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
void* ucontext, bool isInJava) { void* ucontext, bool isInJava) {
assert(Thread::current() == this, "caller must be current thread"); assert(Thread::current() == this, "caller must be current thread");
return pd_get_top_frame(fr_addr, ucontext, isInJava);
}
bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
return pd_get_top_frame(fr_addr, ucontext, isInJava);
}
bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) {
assert(this->is_Java_thread(), "must be JavaThread"); assert(this->is_Java_thread(), "must be JavaThread");
JavaThread* jt = (JavaThread *)this; JavaThread* jt = (JavaThread *)this;
@ -87,4 +96,3 @@ bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
} }
void JavaThread::cache_global_variables() { } void JavaThread::cache_global_variables() { }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -58,6 +58,12 @@
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava); bool isInJava);
bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava);
private:
bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava);
public:
// These routines are only used on cpu architectures that // These routines are only used on cpu architectures that
// have separate register stacks (Itanium). // have separate register stacks (Itanium).
static bool register_stack_overflow() { return false; } static bool register_stack_overflow() { return false; }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -152,7 +152,7 @@ class BuildConfig {
sysDefines.add("_WINDOWS"); sysDefines.add("_WINDOWS");
sysDefines.add("HOTSPOT_BUILD_USER=\\\""+System.getProperty("user.name")+"\\\""); sysDefines.add("HOTSPOT_BUILD_USER=\\\""+System.getProperty("user.name")+"\\\"");
sysDefines.add("HOTSPOT_BUILD_TARGET=\\\""+get("Build")+"\\\""); sysDefines.add("HOTSPOT_BUILD_TARGET=\\\""+get("Build")+"\\\"");
sysDefines.add("INCLUDE_TRACE"); sysDefines.add("INCLUDE_TRACE=1");
sysDefines.add("_JNI_IMPLEMENTATION_"); sysDefines.add("_JNI_IMPLEMENTATION_");
if (vars.get("PlatformName").equals("Win32")) { if (vars.get("PlatformName").equals("Win32")) {
sysDefines.add("HOTSPOT_LIB_ARCH=\\\"i386\\\""); sysDefines.add("HOTSPOT_LIB_ARCH=\\\"i386\\\"");

View File

@ -39,6 +39,7 @@
#include "memory/gcLocker.hpp" #include "memory/gcLocker.hpp"
#include "memory/metadataFactory.hpp" #include "memory/metadataFactory.hpp"
#include "memory/oopFactory.hpp" #include "memory/oopFactory.hpp"
#include "memory/referenceType.hpp"
#include "memory/universe.inline.hpp" #include "memory/universe.inline.hpp"
#include "oops/constantPool.hpp" #include "oops/constantPool.hpp"
#include "oops/fieldStreams.hpp" #include "oops/fieldStreams.hpp"

View File

@ -64,6 +64,11 @@
#include "utilities/growableArray.hpp" #include "utilities/growableArray.hpp"
#include "utilities/ostream.hpp" #include "utilities/ostream.hpp"
#if INCLUDE_TRACE
#include "trace/tracing.hpp"
#endif
ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL; ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) : ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
@ -120,6 +125,12 @@ void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
} }
} }
void ClassLoaderData::classes_do(void f(Klass * const)) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
f(k);
}
}
void ClassLoaderData::classes_do(void f(InstanceKlass*)) { void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) { for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
if (k->oop_is_instance()) { if (k->oop_is_instance()) {
@ -583,6 +594,19 @@ void ClassLoaderDataGraph::classes_do(KlassClosure* klass_closure) {
} }
} }
void ClassLoaderDataGraph::classes_do(void f(Klass* const)) {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
cld->classes_do(f);
}
}
void ClassLoaderDataGraph::classes_unloading_do(void f(Klass* const)) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) {
cld->classes_do(f);
}
}
GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() { GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() {
assert(_head == NULL || _saved_head != NULL, "remember_new_clds(true) not called?"); assert(_head == NULL || _saved_head != NULL, "remember_new_clds(true) not called?");
@ -687,6 +711,11 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
dead->set_next(_unloading); dead->set_next(_unloading);
_unloading = dead; _unloading = dead;
} }
if (seen_dead_loader) {
post_class_unload_events();
}
return seen_dead_loader; return seen_dead_loader;
} }
@ -702,6 +731,20 @@ void ClassLoaderDataGraph::purge() {
Metaspace::purge(); Metaspace::purge();
} }
void ClassLoaderDataGraph::post_class_unload_events(void) {
#if INCLUDE_TRACE
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
if (Tracing::enabled()) {
if (Tracing::is_event_enabled(TraceClassUnloadEvent)) {
assert(_unloading != NULL, "need class loader data unload list!");
_class_unload_time = Tracing::time();
classes_unloading_do(&class_unload_event);
}
Tracing::on_unloading_classes();
}
#endif
}
// CDS support // CDS support
// Global metaspaces for writing information to the shared archive. When // Global metaspaces for writing information to the shared archive. When
@ -769,3 +812,21 @@ void ClassLoaderData::print_value_on(outputStream* out) const {
class_loader()->print_value_on(out); class_loader()->print_value_on(out);
} }
} }
#if INCLUDE_TRACE
TracingTime ClassLoaderDataGraph::_class_unload_time;
void ClassLoaderDataGraph::class_unload_event(Klass* const k) {
// post class unload event
EventClassUnload event(UNTIMED);
event.set_endtime(_class_unload_time);
event.set_unloadedClass(k);
oop defining_class_loader = k->class_loader();
event.set_definingClassLoader(defining_class_loader != NULL ?
defining_class_loader->klass() : (Klass*)NULL);
event.commit();
}
#endif /* INCLUDE_TRACE */

View File

@ -32,6 +32,10 @@
#include "runtime/mutex.hpp" #include "runtime/mutex.hpp"
#include "utilities/growableArray.hpp" #include "utilities/growableArray.hpp"
#if INCLUDE_TRACE
# include "trace/traceTime.hpp"
#endif
// //
// A class loader represents a linkset. Conceptually, a linkset identifies // A class loader represents a linkset. Conceptually, a linkset identifies
// the complete transitive closure of resolved links that a dynamic linker can // the complete transitive closure of resolved links that a dynamic linker can
@ -49,6 +53,7 @@ class ClassLoaderData;
class JNIMethodBlock; class JNIMethodBlock;
class JNIHandleBlock; class JNIHandleBlock;
class Metadebug; class Metadebug;
// GC root for walking class loader data created // GC root for walking class loader data created
class ClassLoaderDataGraph : public AllStatic { class ClassLoaderDataGraph : public AllStatic {
@ -63,6 +68,7 @@ class ClassLoaderDataGraph : public AllStatic {
static ClassLoaderData* _saved_head; static ClassLoaderData* _saved_head;
static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS); static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
static void post_class_unload_events(void);
public: public:
static ClassLoaderData* find_or_create(Handle class_loader, TRAPS); static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
static void purge(); static void purge();
@ -71,6 +77,8 @@ class ClassLoaderDataGraph : public AllStatic {
static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim); static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim); static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
static void classes_do(KlassClosure* klass_closure); static void classes_do(KlassClosure* klass_closure);
static void classes_do(void f(Klass* const));
static void classes_unloading_do(void f(Klass* const));
static bool do_unloading(BoolObjectClosure* is_alive); static bool do_unloading(BoolObjectClosure* is_alive);
// CMS support. // CMS support.
@ -86,6 +94,12 @@ class ClassLoaderDataGraph : public AllStatic {
static bool contains(address x); static bool contains(address x);
static bool contains_loader_data(ClassLoaderData* loader_data); static bool contains_loader_data(ClassLoaderData* loader_data);
#endif #endif
#if INCLUDE_TRACE
private:
static TracingTime _class_unload_time;
static void class_unload_event(Klass* const k);
#endif
}; };
// ClassLoaderData class // ClassLoaderData class
@ -171,7 +185,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
void unload(); void unload();
bool keep_alive() const { return _keep_alive; } bool keep_alive() const { return _keep_alive; }
bool is_alive(BoolObjectClosure* is_alive_closure) const; bool is_alive(BoolObjectClosure* is_alive_closure) const;
void classes_do(void f(Klass*));
void classes_do(void f(InstanceKlass*)); void classes_do(void f(InstanceKlass*));
// Deallocate free list during class unloading. // Deallocate free list during class unloading.

View File

@ -961,7 +961,7 @@ void java_lang_Thread::set_thread_status(oop java_thread,
// Read thread status value from threadStatus field in java.lang.Thread java class. // Read thread status value from threadStatus field in java.lang.Thread java class.
java_lang_Thread::ThreadStatus java_lang_Thread::get_thread_status(oop java_thread) { java_lang_Thread::ThreadStatus java_lang_Thread::get_thread_status(oop java_thread) {
assert(Thread::current()->is_VM_thread() || assert(Thread::current()->is_Watcher_thread() || Thread::current()->is_VM_thread() ||
JavaThread::current()->thread_state() == _thread_in_vm, JavaThread::current()->thread_state() == _thread_in_vm,
"Java Thread is not running in vm"); "Java Thread is not running in vm");
// The threadStatus is only present starting in 1.5 // The threadStatus is only present starting in 1.5

View File

@ -56,6 +56,11 @@
#include "services/classLoadingService.hpp" #include "services/classLoadingService.hpp"
#include "services/threadService.hpp" #include "services/threadService.hpp"
#if INCLUDE_TRACE
#include "trace/tracing.hpp"
#include "trace/traceMacros.hpp"
#endif
Dictionary* SystemDictionary::_dictionary = NULL; Dictionary* SystemDictionary::_dictionary = NULL;
PlaceholderTable* SystemDictionary::_placeholders = NULL; PlaceholderTable* SystemDictionary::_placeholders = NULL;
@ -586,10 +591,15 @@ instanceKlassHandle SystemDictionary::handle_parallel_super_load(
} }
Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle class_loader, Handle protection_domain, TRAPS) { Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
Handle class_loader,
Handle protection_domain,
TRAPS) {
assert(name != NULL && !FieldType::is_array(name) && assert(name != NULL && !FieldType::is_array(name) &&
!FieldType::is_obj(name), "invalid class name"); !FieldType::is_obj(name), "invalid class name");
TracingTime class_load_start_time = Tracing::time();
// UseNewReflection // UseNewReflection
// Fix for 4474172; see evaluation for more details // Fix for 4474172; see evaluation for more details
class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader())); class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader()));
@ -804,8 +814,9 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
// during compilations. // during compilations.
MutexLocker mu(Compile_lock, THREAD); MutexLocker mu(Compile_lock, THREAD);
update_dictionary(d_index, d_hash, p_index, p_hash, update_dictionary(d_index, d_hash, p_index, p_hash,
k, class_loader, THREAD); k, class_loader, THREAD);
} }
if (JvmtiExport::should_post_class_load()) { if (JvmtiExport::should_post_class_load()) {
Thread *thread = THREAD; Thread *thread = THREAD;
assert(thread->is_Java_thread(), "thread->is_Java_thread()"); assert(thread->is_Java_thread(), "thread->is_Java_thread()");
@ -861,8 +872,8 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
// This brackets the SystemDictionary updates for both defining // This brackets the SystemDictionary updates for both defining
// and initiating loaders // and initiating loaders
MutexLocker mu(SystemDictionary_lock, THREAD); MutexLocker mu(SystemDictionary_lock, THREAD);
placeholders()->find_and_remove(p_index, p_hash, name, loader_data, PlaceholderTable::LOAD_INSTANCE, THREAD); placeholders()->find_and_remove(p_index, p_hash, name, loader_data, PlaceholderTable::LOAD_INSTANCE, THREAD);
SystemDictionary_lock->notify_all(); SystemDictionary_lock->notify_all();
} }
} }
@ -870,6 +881,8 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
return NULL; return NULL;
} }
post_class_load_event(class_load_start_time, k, class_loader);
#ifdef ASSERT #ifdef ASSERT
{ {
ClassLoaderData* loader_data = k->class_loader_data(); ClassLoaderData* loader_data = k->class_loader_data();
@ -993,6 +1006,8 @@ Klass* SystemDictionary::parse_stream(Symbol* class_name,
TRAPS) { TRAPS) {
TempNewSymbol parsed_name = NULL; TempNewSymbol parsed_name = NULL;
TracingTime class_load_start_time = Tracing::time();
ClassLoaderData* loader_data; ClassLoaderData* loader_data;
if (host_klass.not_null()) { if (host_klass.not_null()) {
// Create a new CLD for anonymous class, that uses the same class loader // Create a new CLD for anonymous class, that uses the same class loader
@ -1048,6 +1063,8 @@ Klass* SystemDictionary::parse_stream(Symbol* class_name,
assert(THREAD->is_Java_thread(), "thread->is_Java_thread()"); assert(THREAD->is_Java_thread(), "thread->is_Java_thread()");
JvmtiExport::post_class_load((JavaThread *) THREAD, k()); JvmtiExport::post_class_load((JavaThread *) THREAD, k());
} }
post_class_load_event(class_load_start_time, k, class_loader);
} }
assert(host_klass.not_null() || cp_patches == NULL, assert(host_klass.not_null() || cp_patches == NULL,
"cp_patches only found with host_klass"); "cp_patches only found with host_klass");
@ -1435,6 +1452,7 @@ void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) {
JvmtiExport::post_class_load((JavaThread *) THREAD, k()); JvmtiExport::post_class_load((JavaThread *) THREAD, k());
} }
} }
// Support parallel classloading // Support parallel classloading
@ -1678,6 +1696,7 @@ int SystemDictionary::calculate_systemdictionary_size(int classcount) {
} }
return newsize; return newsize;
} }
// Assumes classes in the SystemDictionary are only unloaded at a safepoint // Assumes classes in the SystemDictionary are only unloaded at a safepoint
// Note: anonymous classes are not in the SD. // Note: anonymous classes are not in the SD.
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) { bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
@ -2024,12 +2043,6 @@ void SystemDictionary::update_dictionary(int d_index, unsigned int d_hash,
} }
} }
// Assign a classid if one has not already been assigned. The
// counter does not need to be atomically incremented since this
// is only done while holding the SystemDictionary_lock.
// All loaded classes get a unique ID.
TRACE_INIT_ID(k);
// Make a new system dictionary entry. // Make a new system dictionary entry.
Klass* sd_check = find_class(d_index, d_hash, name, loader_data); Klass* sd_check = find_class(d_index, d_hash, name, loader_data);
if (sd_check == NULL) { if (sd_check == NULL) {
@ -2612,6 +2625,27 @@ void SystemDictionary::verify_obj_klass_present(Symbol* class_name,
"Loaded klasses should be in SystemDictionary"); "Loaded klasses should be in SystemDictionary");
} }
// utility function for class load event
void SystemDictionary::post_class_load_event(TracingTime start_time,
instanceKlassHandle k,
Handle initiating_loader) {
#if INCLUDE_TRACE
EventClassLoad event(UNTIMED);
if (event.should_commit()) {
event.set_endtime(Tracing::time());
event.set_starttime(start_time);
event.set_loadedClass(k());
oop defining_class_loader = k->class_loader();
event.set_definingClassLoader(defining_class_loader != NULL ?
defining_class_loader->klass() : (Klass*)NULL);
oop class_loader = initiating_loader.is_null() ? (oop)NULL : initiating_loader();
event.set_initiatingClassLoader(class_loader != NULL ?
class_loader->klass() : (Klass*)NULL);
event.commit();
}
#endif /* INCLUDE_TRACE */
}
#ifndef PRODUCT #ifndef PRODUCT
// statistics code // statistics code

View File

@ -31,9 +31,11 @@
#include "oops/symbol.hpp" #include "oops/symbol.hpp"
#include "runtime/java.hpp" #include "runtime/java.hpp"
#include "runtime/reflectionUtils.hpp" #include "runtime/reflectionUtils.hpp"
#include "trace/traceTime.hpp"
#include "utilities/hashtable.hpp" #include "utilities/hashtable.hpp"
#include "utilities/hashtable.inline.hpp" #include "utilities/hashtable.inline.hpp"
// The system dictionary stores all loaded classes and maps: // The system dictionary stores all loaded classes and maps:
// //
// [class name,class loader] -> class i.e. [Symbol*,oop] -> Klass* // [class name,class loader] -> class i.e. [Symbol*,oop] -> Klass*
@ -636,6 +638,9 @@ private:
// Setup link to hierarchy // Setup link to hierarchy
static void add_to_hierarchy(instanceKlassHandle k, TRAPS); static void add_to_hierarchy(instanceKlassHandle k, TRAPS);
// event based tracing
static void post_class_load_event(TracingTime start_time, instanceKlassHandle k,
Handle initiating_loader);
// We pass in the hashtable index so we can calculate it outside of // We pass in the hashtable index so we can calculate it outside of
// the SystemDictionary_lock. // the SystemDictionary_lock.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -45,6 +45,7 @@
#include "runtime/java.hpp" #include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp" #include "runtime/mutexLocker.hpp"
#include "services/memoryService.hpp" #include "services/memoryService.hpp"
#include "trace/tracing.hpp"
#include "utilities/xmlstream.hpp" #include "utilities/xmlstream.hpp"
// Helper class for printing in CodeCache // Helper class for printing in CodeCache
@ -114,7 +115,6 @@ class CodeBlob_sizes {
} }
}; };
// CodeCache implementation // CodeCache implementation
CodeHeap * CodeCache::_heap = new CodeHeap(); CodeHeap * CodeCache::_heap = new CodeHeap();
@ -126,6 +126,7 @@ bool CodeCache::_needs_cache_clean = false;
nmethod* CodeCache::_scavenge_root_nmethods = NULL; nmethod* CodeCache::_scavenge_root_nmethods = NULL;
nmethod* CodeCache::_saved_nmethods = NULL; nmethod* CodeCache::_saved_nmethods = NULL;
int CodeCache::_codemem_full_count = 0;
CodeBlob* CodeCache::first() { CodeBlob* CodeCache::first() {
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
@ -829,6 +830,22 @@ void CodeCache::verify() {
} }
} }
void CodeCache::report_codemem_full() {
_codemem_full_count++;
EventCodeCacheFull event;
if (event.should_commit()) {
event.set_startAddress((u8)low_bound());
event.set_commitedTopAddress((u8)high());
event.set_reservedTopAddress((u8)high_bound());
event.set_entryCount(nof_blobs());
event.set_methodCount(nof_nmethods());
event.set_adaptorCount(nof_adapters());
event.set_unallocatedCapacity(unallocated_capacity()/K);
event.set_fullCount(_codemem_full_count);
event.commit();
}
}
//------------------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------------------
// Non-product version // Non-product version

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -64,11 +64,15 @@ class CodeCache : AllStatic {
static void mark_scavenge_root_nmethods() PRODUCT_RETURN; static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN; static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
static int _codemem_full_count;
public: public:
// Initialization // Initialization
static void initialize(); static void initialize();
static void report_codemem_full();
// Allocation/administration // Allocation/administration
static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
@ -155,6 +159,7 @@ class CodeCache : AllStatic {
// The full limits of the codeCache // The full limits of the codeCache
static address low_bound() { return (address) _heap->low_boundary(); } static address low_bound() { return (address) _heap->low_boundary(); }
static address high_bound() { return (address) _heap->high_boundary(); } static address high_bound() { return (address) _heap->high_boundary(); }
static address high() { return (address) _heap->high(); }
// Profiling // Profiling
static address first_address(); // first address used for CodeBlobs static address first_address(); // first address used for CodeBlobs
@ -186,6 +191,8 @@ class CodeCache : AllStatic {
// tells how many nmethods have dependencies // tells how many nmethods have dependencies
static int number_of_nmethods_with_dependencies(); static int number_of_nmethods_with_dependencies();
static int get_codemem_full_count() { return _codemem_full_count; }
}; };
#endif // SHARE_VM_CODE_CODECACHE_HPP #endif // SHARE_VM_CODE_CODECACHE_HPP

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -43,6 +43,7 @@
#include "runtime/os.hpp" #include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
#include "runtime/sweeper.hpp" #include "runtime/sweeper.hpp"
#include "trace/tracing.hpp"
#include "utilities/dtrace.hpp" #include "utilities/dtrace.hpp"
#include "utilities/events.hpp" #include "utilities/events.hpp"
#ifdef COMPILER1 #ifdef COMPILER1
@ -179,9 +180,11 @@ int CompileBroker::_sum_standard_bytes_compiled = 0;
int CompileBroker::_sum_nmethod_size = 0; int CompileBroker::_sum_nmethod_size = 0;
int CompileBroker::_sum_nmethod_code_size = 0; int CompileBroker::_sum_nmethod_code_size = 0;
CompileQueue* CompileBroker::_c2_method_queue = NULL; long CompileBroker::_peak_compilation_time = 0;
CompileQueue* CompileBroker::_c1_method_queue = NULL;
CompileTask* CompileBroker::_task_free_list = NULL; CompileQueue* CompileBroker::_c2_method_queue = NULL;
CompileQueue* CompileBroker::_c1_method_queue = NULL;
CompileTask* CompileBroker::_task_free_list = NULL;
GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL; GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL;
@ -1795,6 +1798,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
ciMethod* target = ci_env.get_method_from_handle(target_handle); ciMethod* target = ci_env.get_method_from_handle(target_handle);
TraceTime t1("compilation", &time); TraceTime t1("compilation", &time);
EventCompilation event;
AbstractCompiler *comp = compiler(task_level); AbstractCompiler *comp = compiler(task_level);
if (comp == NULL) { if (comp == NULL) {
@ -1836,6 +1840,16 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
} }
// simulate crash during compilation // simulate crash during compilation
assert(task->compile_id() != CICrashAt, "just as planned"); assert(task->compile_id() != CICrashAt, "just as planned");
if (event.should_commit()) {
event.set_method(target->get_Method());
event.set_compileID(compile_id);
event.set_compileLevel(task->comp_level());
event.set_succeded(task->is_success());
event.set_isOsr(is_osr);
event.set_codeSize((task->code() == NULL) ? 0 : task->code()->total_size());
event.set_inlinedBytes(task->num_inlined_bytecodes());
event.commit();
}
} }
pop_jni_handle_block(); pop_jni_handle_block();
@ -1916,6 +1930,10 @@ void CompileBroker::handle_full_code_cache() {
} }
warning("CodeCache is full. Compiler has been disabled."); warning("CodeCache is full. Compiler has been disabled.");
warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
CodeCache::report_codemem_full();
#ifndef PRODUCT #ifndef PRODUCT
if (CompileTheWorld || ExitOnFullCodeCache) { if (CompileTheWorld || ExitOnFullCodeCache) {
codecache_print(/* detailed= */ true); codecache_print(/* detailed= */ true);
@ -2073,8 +2091,10 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time
// java.lang.management.CompilationMBean // java.lang.management.CompilationMBean
_perf_total_compilation->inc(time.ticks()); _perf_total_compilation->inc(time.ticks());
_t_total_compilation.add(time);
_peak_compilation_time = time.milliseconds() > _peak_compilation_time ? time.milliseconds() : _peak_compilation_time;
if (CITime) { if (CITime) {
_t_total_compilation.add(time);
if (is_osr) { if (is_osr) {
_t_osr_compilation.add(time); _t_osr_compilation.add(time);
_sum_osr_bytes_compiled += method->code_size() + task->num_inlined_bytecodes(); _sum_osr_bytes_compiled += method->code_size() + task->num_inlined_bytecodes();
@ -2172,7 +2192,6 @@ void CompileBroker::print_times() {
tty->print_cr(" nmethod total size : %6d bytes", CompileBroker::_sum_nmethod_size); tty->print_cr(" nmethod total size : %6d bytes", CompileBroker::_sum_nmethod_size);
} }
// Debugging output for failure // Debugging output for failure
void CompileBroker::print_last_compile() { void CompileBroker::print_last_compile() {
if ( _last_compile_level != CompLevel_none && if ( _last_compile_level != CompLevel_none &&

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -299,17 +299,17 @@ class CompileBroker: AllStatic {
static elapsedTimer _t_osr_compilation; static elapsedTimer _t_osr_compilation;
static elapsedTimer _t_standard_compilation; static elapsedTimer _t_standard_compilation;
static int _total_compile_count;
static int _total_bailout_count; static int _total_bailout_count;
static int _total_invalidated_count; static int _total_invalidated_count;
static int _total_compile_count;
static int _total_native_compile_count; static int _total_native_compile_count;
static int _total_osr_compile_count; static int _total_osr_compile_count;
static int _total_standard_compile_count; static int _total_standard_compile_count;
static int _sum_osr_bytes_compiled; static int _sum_osr_bytes_compiled;
static int _sum_standard_bytes_compiled; static int _sum_standard_bytes_compiled;
static int _sum_nmethod_size; static int _sum_nmethod_size;
static int _sum_nmethod_code_size; static int _sum_nmethod_code_size;
static long _peak_compilation_time;
static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS); static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS);
static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count); static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
@ -421,6 +421,19 @@ class CompileBroker: AllStatic {
// compiler name for debugging // compiler name for debugging
static const char* compiler_name(int comp_level); static const char* compiler_name(int comp_level);
static int get_total_compile_count() { return _total_compile_count; }
static int get_total_bailout_count() { return _total_bailout_count; }
static int get_total_invalidated_count() { return _total_invalidated_count; }
static int get_total_native_compile_count() { return _total_native_compile_count; }
static int get_total_osr_compile_count() { return _total_osr_compile_count; }
static int get_total_standard_compile_count() { return _total_standard_compile_count; }
static int get_sum_osr_bytes_compiled() { return _sum_osr_bytes_compiled; }
static int get_sum_standard_bytes_compiled() { return _sum_standard_bytes_compiled; }
static int get_sum_nmethod_size() { return _sum_nmethod_size;}
static int get_sum_nmethod_code_size() { return _sum_nmethod_code_size; }
static long get_peak_compilation_time() { return _peak_compilation_time; }
static long get_total_compilation_time() { return _t_total_compilation.milliseconds(); }
}; };
#endif // SHARE_VM_COMPILER_COMPILEBROKER_HPP #endif // SHARE_VM_COMPILER_COMPILEBROKER_HPP

View File

@ -37,8 +37,12 @@
#include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp" #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
#include "gc_implementation/parNew/parNewGeneration.hpp" #include "gc_implementation/parNew/parNewGeneration.hpp"
#include "gc_implementation/shared/collectorCounters.hpp" #include "gc_implementation/shared/collectorCounters.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp"
#include "gc_interface/collectedHeap.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp"
#include "memory/allocation.hpp"
#include "memory/cardTableRS.hpp" #include "memory/cardTableRS.hpp"
#include "memory/collectorPolicy.hpp" #include "memory/collectorPolicy.hpp"
#include "memory/gcLocker.inline.hpp" #include "memory/gcLocker.inline.hpp"
@ -60,7 +64,8 @@
// statics // statics
CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL; CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
bool CMSCollector::_full_gc_requested = false; bool CMSCollector::_full_gc_requested = false;
GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////
// In support of CMS/VM thread synchronization // In support of CMS/VM thread synchronization
@ -591,7 +596,10 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_concurrent_cycles_since_last_unload(0), _concurrent_cycles_since_last_unload(0),
_roots_scanning_options(0), _roots_scanning_options(0),
_inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
_intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding) _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
_gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
_cms_start_registered(false)
{ {
if (ExplicitGCInvokesConcurrentAndUnloadsClasses) { if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
ExplicitGCInvokesConcurrent = true; ExplicitGCInvokesConcurrent = true;
@ -1676,18 +1684,38 @@ void CMSCollector::collect(bool full,
_full_gcs_since_conc_gc++; _full_gcs_since_conc_gc++;
} }
void CMSCollector::request_full_gc(unsigned int full_gc_count) { void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
unsigned int gc_count = gch->total_full_collections(); unsigned int gc_count = gch->total_full_collections();
if (gc_count == full_gc_count) { if (gc_count == full_gc_count) {
MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
_full_gc_requested = true; _full_gc_requested = true;
_full_gc_cause = cause;
CGC_lock->notify(); // nudge CMS thread CGC_lock->notify(); // nudge CMS thread
} else { } else {
assert(gc_count > full_gc_count, "Error: causal loop"); assert(gc_count > full_gc_count, "Error: causal loop");
} }
} }
bool CMSCollector::is_external_interruption() {
GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
return GCCause::is_user_requested_gc(cause) ||
GCCause::is_serviceability_requested_gc(cause);
}
void CMSCollector::report_concurrent_mode_interruption() {
if (is_external_interruption()) {
if (PrintGCDetails) {
gclog_or_tty->print(" (concurrent mode interrupted)");
}
} else {
if (PrintGCDetails) {
gclog_or_tty->print(" (concurrent mode failure)");
}
_gc_tracer_cm->report_concurrent_mode_failure();
}
}
// The foreground and background collectors need to coordinate in order // The foreground and background collectors need to coordinate in order
// to make sure that they do not mutually interfere with CMS collections. // to make sure that they do not mutually interfere with CMS collections.
@ -1845,14 +1873,8 @@ NOT_PRODUCT(
} }
) )
if (PrintGCDetails && first_state > Idling) { if (first_state > Idling) {
GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause(); report_concurrent_mode_interruption();
if (GCCause::is_user_requested_gc(cause) ||
GCCause::is_serviceability_requested_gc(cause)) {
gclog_or_tty->print(" (concurrent mode interrupted)");
} else {
gclog_or_tty->print(" (concurrent mode failure)");
}
} }
set_did_compact(should_compact); set_did_compact(should_compact);
@ -1868,6 +1890,10 @@ NOT_PRODUCT(
// Reference objects are active. // Reference objects are active.
ref_processor()->clean_up_discovered_references(); ref_processor()->clean_up_discovered_references();
if (first_state > Idling) {
save_heap_summary();
}
do_compaction_work(clear_all_soft_refs); do_compaction_work(clear_all_soft_refs);
// Has the GC time limit been exceeded? // Has the GC time limit been exceeded?
@ -1971,7 +1997,14 @@ void CMSCollector::decide_foreground_collection_type(
// a mark-sweep-compact. // a mark-sweep-compact.
void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
gc_timer->register_gc_start(os::elapsed_counter());
SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) { if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d " gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
"collections passed to foreground collector", _full_gcs_since_conc_gc); "collections passed to foreground collector", _full_gcs_since_conc_gc);
@ -2062,6 +2095,10 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
size_policy()->msc_collection_end(gch->gc_cause()); size_policy()->msc_collection_end(gch->gc_cause());
} }
gc_timer->register_gc_end(os::elapsed_counter());
gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
// For a mark-sweep-compact, compute_new_size() will be called // For a mark-sweep-compact, compute_new_size() will be called
// in the heap's do_collection() method. // in the heap's do_collection() method.
} }
@ -2093,7 +2130,7 @@ void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
// required. // required.
_collectorState = FinalMarking; _collectorState = FinalMarking;
} }
collect_in_foreground(clear_all_soft_refs); collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
// For a mark-sweep, compute_new_size() will be called // For a mark-sweep, compute_new_size() will be called
// in the heap's do_collection() method. // in the heap's do_collection() method.
@ -2153,7 +2190,7 @@ class ReleaseForegroundGC: public StackObj {
// one "collect" method between the background collector and the foreground // one "collect" method between the background collector and the foreground
// collector but the if-then-else required made it cleaner to have // collector but the if-then-else required made it cleaner to have
// separate methods. // separate methods.
void CMSCollector::collect_in_background(bool clear_all_soft_refs) { void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
assert(Thread::current()->is_ConcurrentGC_thread(), assert(Thread::current()->is_ConcurrentGC_thread(),
"A CMS asynchronous collection is only allowed on a CMS thread."); "A CMS asynchronous collection is only allowed on a CMS thread.");
@ -2172,6 +2209,7 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
} else { } else {
assert(_collectorState == Idling, "Should be idling before start."); assert(_collectorState == Idling, "Should be idling before start.");
_collectorState = InitialMarking; _collectorState = InitialMarking;
register_gc_start(cause);
// Reset the expansion cause, now that we are about to begin // Reset the expansion cause, now that we are about to begin
// a new cycle. // a new cycle.
clear_expansion_cause(); clear_expansion_cause();
@ -2184,6 +2222,7 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
// ensuing concurrent GC cycle. // ensuing concurrent GC cycle.
update_should_unload_classes(); update_should_unload_classes();
_full_gc_requested = false; // acks all outstanding full gc requests _full_gc_requested = false; // acks all outstanding full gc requests
_full_gc_cause = GCCause::_no_gc;
// Signal that we are about to start a collection // Signal that we are about to start a collection
gch->increment_total_full_collections(); // ... starting a collection cycle gch->increment_total_full_collections(); // ... starting a collection cycle
_collection_count_start = gch->total_full_collections(); _collection_count_start = gch->total_full_collections();
@ -2263,7 +2302,6 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
{ {
ReleaseForegroundGC x(this); ReleaseForegroundGC x(this);
stats().record_cms_begin(); stats().record_cms_begin();
VM_CMS_Initial_Mark initial_mark_op(this); VM_CMS_Initial_Mark initial_mark_op(this);
VMThread::execute(&initial_mark_op); VMThread::execute(&initial_mark_op);
} }
@ -2343,6 +2381,7 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
CMSTokenSync z(true); // not strictly needed. CMSTokenSync z(true); // not strictly needed.
if (_collectorState == Resizing) { if (_collectorState == Resizing) {
compute_new_size(); compute_new_size();
save_heap_summary();
_collectorState = Resetting; _collectorState = Resetting;
} else { } else {
assert(_collectorState == Idling, "The state should only change" assert(_collectorState == Idling, "The state should only change"
@ -2401,7 +2440,39 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
} }
} }
void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) { void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
if (!_cms_start_registered) {
register_gc_start(cause);
}
}
void CMSCollector::register_gc_start(GCCause::Cause cause) {
_cms_start_registered = true;
_gc_timer_cm->register_gc_start(os::elapsed_counter());
_gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
}
void CMSCollector::register_gc_end() {
if (_cms_start_registered) {
report_heap_summary(GCWhen::AfterGC);
_gc_timer_cm->register_gc_end(os::elapsed_counter());
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
_cms_start_registered = false;
}
}
void CMSCollector::save_heap_summary() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
_last_heap_summary = gch->create_heap_summary();
_last_metaspace_summary = gch->create_metaspace_summary();
}
void CMSCollector::report_heap_summary(GCWhen::Type when) {
_gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_metaspace_summary);
}
void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
assert(_foregroundGCIsActive && !_foregroundGCShouldWait, assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
"Foreground collector should be waiting, not executing"); "Foreground collector should be waiting, not executing");
assert(Thread::current()->is_VM_thread(), "A foreground collection" assert(Thread::current()->is_VM_thread(), "A foreground collection"
@ -2409,8 +2480,8 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
"VM thread should have CMS token"); "VM thread should have CMS token");
NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
true, gclog_or_tty);) true, NULL);)
if (UseAdaptiveSizePolicy) { if (UseAdaptiveSizePolicy) {
size_policy()->ms_collection_begin(); size_policy()->ms_collection_begin();
} }
@ -2434,6 +2505,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
} }
switch (_collectorState) { switch (_collectorState) {
case InitialMarking: case InitialMarking:
register_foreground_gc_start(cause);
init_mark_was_synchronous = true; // fact to be exploited in re-mark init_mark_was_synchronous = true; // fact to be exploited in re-mark
checkpointRootsInitial(false); checkpointRootsInitial(false);
assert(_collectorState == Marking, "Collector state should have changed" assert(_collectorState == Marking, "Collector state should have changed"
@ -2482,6 +2554,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
Universe::verify("Verify before reset: "); Universe::verify("Verify before reset: ");
} }
save_heap_summary();
reset(false); reset(false);
assert(_collectorState == Idling, "Collector state should " assert(_collectorState == Idling, "Collector state should "
"have changed"); "have changed");
@ -3504,6 +3577,9 @@ void CMSCollector::checkpointRootsInitial(bool asynch) {
check_correct_thread_executing(); check_correct_thread_executing();
TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
save_heap_summary();
report_heap_summary(GCWhen::BeforeGC);
ReferenceProcessor* rp = ref_processor(); ReferenceProcessor* rp = ref_processor();
SpecializationStats::clear(); SpecializationStats::clear();
assert(_restart_addr == NULL, "Control point invariant"); assert(_restart_addr == NULL, "Control point invariant");
@ -3549,8 +3625,8 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
// CMS collection cycle. // CMS collection cycle.
setup_cms_unloading_and_verification_state(); setup_cms_unloading_and_verification_state();
NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork", NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
PrintGCDetails && Verbose, true, gclog_or_tty);) PrintGCDetails && Verbose, true, _gc_timer_cm);)
if (UseAdaptiveSizePolicy) { if (UseAdaptiveSizePolicy) {
size_policy()->checkpoint_roots_initial_begin(); size_policy()->checkpoint_roots_initial_begin();
} }
@ -4542,8 +4618,10 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
// The code in this method may need further // The code in this method may need further
// tweaking for better performance and some restructuring // tweaking for better performance and some restructuring
// for cleaner interfaces. // for cleaner interfaces.
GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
rp->preclean_discovered_references( rp->preclean_discovered_references(
rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl); rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
gc_timer);
} }
if (clean_survivor) { // preclean the active survivor space(s) if (clean_survivor) { // preclean the active survivor space(s)
@ -4885,8 +4963,8 @@ void CMSCollector::checkpointRootsFinal(bool asynch,
// Temporarily set flag to false, GCH->do_collection will // Temporarily set flag to false, GCH->do_collection will
// expect it to be false and set to true // expect it to be false and set to true
FlagSetting fl(gch->_is_gc_active, false); FlagSetting fl(gch->_is_gc_active, false);
NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark", NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
PrintGCDetails && Verbose, true, gclog_or_tty);) PrintGCDetails && Verbose, true, _gc_timer_cm);)
int level = _cmsGen->level() - 1; int level = _cmsGen->level() - 1;
if (level >= 0) { if (level >= 0) {
gch->do_collection(true, // full (i.e. force, see below) gch->do_collection(true, // full (i.e. force, see below)
@ -4915,7 +4993,7 @@ void CMSCollector::checkpointRootsFinal(bool asynch,
void CMSCollector::checkpointRootsFinalWork(bool asynch, void CMSCollector::checkpointRootsFinalWork(bool asynch,
bool clear_all_soft_refs, bool init_mark_was_synchronous) { bool clear_all_soft_refs, bool init_mark_was_synchronous) {
NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);) NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
assert(haveFreelistLocks(), "must have free list locks"); assert(haveFreelistLocks(), "must have free list locks");
assert_lock_strong(bitMapLock()); assert_lock_strong(bitMapLock());
@ -4966,11 +5044,11 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
// the most recent young generation GC, minus those cleaned up by the // the most recent young generation GC, minus those cleaned up by the
// concurrent precleaning. // concurrent precleaning.
if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) { if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty); GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
do_remark_parallel(); do_remark_parallel();
} else { } else {
TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
gclog_or_tty); _gc_timer_cm);
do_remark_non_parallel(); do_remark_non_parallel();
} }
} }
@ -4983,7 +5061,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
verify_overflow_empty(); verify_overflow_empty();
{ {
NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);) NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
refProcessingWork(asynch, clear_all_soft_refs); refProcessingWork(asynch, clear_all_soft_refs);
} }
verify_work_stacks_empty(); verify_work_stacks_empty();
@ -5044,6 +5122,8 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
verify_after_remark(); verify_after_remark();
} }
_gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
// Change under the freelistLocks. // Change under the freelistLocks.
_collectorState = Sweeping; _collectorState = Sweeping;
// Call isAllClear() under bitMapLock // Call isAllClear() under bitMapLock
@ -5697,7 +5777,7 @@ void CMSCollector::do_remark_non_parallel() {
NULL, // space is set further below NULL, // space is set further below
&_markBitMap, &_markStack, &mrias_cl); &_markBitMap, &_markStack, &mrias_cl);
{ {
TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty); GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
// Iterate over the dirty cards, setting the corresponding bits in the // Iterate over the dirty cards, setting the corresponding bits in the
// mod union table. // mod union table.
{ {
@ -5734,7 +5814,7 @@ void CMSCollector::do_remark_non_parallel() {
Universe::verify(); Universe::verify();
} }
{ {
TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty); GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
verify_work_stacks_empty(); verify_work_stacks_empty();
@ -5756,7 +5836,7 @@ void CMSCollector::do_remark_non_parallel() {
} }
{ {
TraceTime t("visit unhandled CLDs", PrintGCDetails, false, gclog_or_tty); GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
verify_work_stacks_empty(); verify_work_stacks_empty();
@ -5775,7 +5855,7 @@ void CMSCollector::do_remark_non_parallel() {
} }
{ {
TraceTime t("dirty klass scan", PrintGCDetails, false, gclog_or_tty); GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
verify_work_stacks_empty(); verify_work_stacks_empty();
@ -5977,7 +6057,9 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
_span, &_markBitMap, &_markStack, _span, &_markBitMap, &_markStack,
&cmsKeepAliveClosure, false /* !preclean */); &cmsKeepAliveClosure, false /* !preclean */);
{ {
TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty); GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
ReferenceProcessorStats stats;
if (rp->processing_is_mt()) { if (rp->processing_is_mt()) {
// Set the degree of MT here. If the discovery is done MT, there // Set the degree of MT here. If the discovery is done MT, there
// may have been a different number of threads doing the discovery // may have been a different number of threads doing the discovery
@ -5996,16 +6078,20 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
} }
rp->set_active_mt_degree(active_workers); rp->set_active_mt_degree(active_workers);
CMSRefProcTaskExecutor task_executor(*this); CMSRefProcTaskExecutor task_executor(*this);
rp->process_discovered_references(&_is_alive_closure, stats = rp->process_discovered_references(&_is_alive_closure,
&cmsKeepAliveClosure, &cmsKeepAliveClosure,
&cmsDrainMarkingStackClosure, &cmsDrainMarkingStackClosure,
&task_executor); &task_executor,
_gc_timer_cm);
} else { } else {
rp->process_discovered_references(&_is_alive_closure, stats = rp->process_discovered_references(&_is_alive_closure,
&cmsKeepAliveClosure, &cmsKeepAliveClosure,
&cmsDrainMarkingStackClosure, &cmsDrainMarkingStackClosure,
NULL); NULL,
_gc_timer_cm);
} }
_gc_tracer_cm->report_gc_reference_stats(stats);
} }
// This is the point where the entire marking should have completed. // This is the point where the entire marking should have completed.
@ -6013,7 +6099,7 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
if (should_unload_classes()) { if (should_unload_classes()) {
{ {
TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty); GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
// Unload classes and purge the SystemDictionary. // Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
@ -6026,7 +6112,7 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
} }
{ {
TraceTime t("scrub symbol table", PrintGCDetails, false, gclog_or_tty); GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
// Clean up unreferenced symbols in symbol table. // Clean up unreferenced symbols in symbol table.
SymbolTable::unlink(); SymbolTable::unlink();
} }
@ -6035,7 +6121,7 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
// CMS doesn't use the StringTable as hard roots when class unloading is turned off. // CMS doesn't use the StringTable as hard roots when class unloading is turned off.
// Need to check if we really scanned the StringTable. // Need to check if we really scanned the StringTable.
if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) { if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty); GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
// Delete entries for dead interned strings. // Delete entries for dead interned strings.
StringTable::unlink(&_is_alive_closure); StringTable::unlink(&_is_alive_closure);
} }
@ -6380,12 +6466,14 @@ void CMSCollector::reset(bool asynch) {
_cmsGen->rotate_debug_collection_type(); _cmsGen->rotate_debug_collection_type();
} }
) )
register_gc_end();
} }
void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) { void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
TraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty); GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
TraceCollectorStats tcs(counters()); TraceCollectorStats tcs(counters());
switch (op) { switch (op) {

View File

@ -25,8 +25,10 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gSpaceCounters.hpp" #include "gc_implementation/shared/gSpaceCounters.hpp"
#include "gc_implementation/shared/gcStats.hpp" #include "gc_implementation/shared/gcStats.hpp"
#include "gc_implementation/shared/gcWhen.hpp"
#include "gc_implementation/shared/generationCounters.hpp" #include "gc_implementation/shared/generationCounters.hpp"
#include "memory/freeBlockDictionary.hpp" #include "memory/freeBlockDictionary.hpp"
#include "memory/generation.hpp" #include "memory/generation.hpp"
@ -53,6 +55,8 @@
class CMSAdaptiveSizePolicy; class CMSAdaptiveSizePolicy;
class CMSConcMarkingTask; class CMSConcMarkingTask;
class CMSGCAdaptivePolicyCounters; class CMSGCAdaptivePolicyCounters;
class CMSTracer;
class ConcurrentGCTimer;
class ConcurrentMarkSweepGeneration; class ConcurrentMarkSweepGeneration;
class ConcurrentMarkSweepPolicy; class ConcurrentMarkSweepPolicy;
class ConcurrentMarkSweepThread; class ConcurrentMarkSweepThread;
@ -61,6 +65,7 @@ class FreeChunk;
class PromotionInfo; class PromotionInfo;
class ScanMarkedObjectsAgainCarefullyClosure; class ScanMarkedObjectsAgainCarefullyClosure;
class TenuredGeneration; class TenuredGeneration;
class SerialOldTracer;
// A generic CMS bit map. It's the basis for both the CMS marking bit map // A generic CMS bit map. It's the basis for both the CMS marking bit map
// as well as for the mod union table (in each case only a subset of the // as well as for the mod union table (in each case only a subset of the
@ -567,8 +572,9 @@ class CMSCollector: public CHeapObj<mtGC> {
bool _completed_initialization; bool _completed_initialization;
// In support of ExplicitGCInvokesConcurrent // In support of ExplicitGCInvokesConcurrent
static bool _full_gc_requested; static bool _full_gc_requested;
unsigned int _collection_count_start; static GCCause::Cause _full_gc_cause;
unsigned int _collection_count_start;
// Should we unload classes this concurrent cycle? // Should we unload classes this concurrent cycle?
bool _should_unload_classes; bool _should_unload_classes;
@ -609,6 +615,20 @@ class CMSCollector: public CHeapObj<mtGC> {
AdaptivePaddedAverage _inter_sweep_estimate; AdaptivePaddedAverage _inter_sweep_estimate;
AdaptivePaddedAverage _intra_sweep_estimate; AdaptivePaddedAverage _intra_sweep_estimate;
CMSTracer* _gc_tracer_cm;
ConcurrentGCTimer* _gc_timer_cm;
bool _cms_start_registered;
GCHeapSummary _last_heap_summary;
MetaspaceSummary _last_metaspace_summary;
void register_foreground_gc_start(GCCause::Cause cause);
void register_gc_start(GCCause::Cause cause);
void register_gc_end();
void save_heap_summary();
void report_heap_summary(GCWhen::Type when);
protected: protected:
ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS)
MemRegion _span; // span covering above two MemRegion _span; // span covering above two
@ -827,6 +847,10 @@ class CMSCollector: public CHeapObj<mtGC> {
void do_mark_sweep_work(bool clear_all_soft_refs, void do_mark_sweep_work(bool clear_all_soft_refs,
CollectorState first_state, bool should_start_over); CollectorState first_state, bool should_start_over);
// Work methods for reporting concurrent mode interruption or failure
bool is_external_interruption();
void report_concurrent_mode_interruption();
// If the backgrould GC is active, acquire control from the background // If the backgrould GC is active, acquire control from the background
// GC and do the collection. // GC and do the collection.
void acquire_control_and_collect(bool full, bool clear_all_soft_refs); void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
@ -876,11 +900,11 @@ class CMSCollector: public CHeapObj<mtGC> {
bool clear_all_soft_refs, bool clear_all_soft_refs,
size_t size, size_t size,
bool tlab); bool tlab);
void collect_in_background(bool clear_all_soft_refs); void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause);
void collect_in_foreground(bool clear_all_soft_refs); void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause);
// In support of ExplicitGCInvokesConcurrent // In support of ExplicitGCInvokesConcurrent
static void request_full_gc(unsigned int full_gc_count); static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
// Should we unload classes in a particular concurrent cycle? // Should we unload classes in a particular concurrent cycle?
bool should_unload_classes() const { bool should_unload_classes() const {
return _should_unload_classes; return _should_unload_classes;

View File

@ -140,7 +140,9 @@ void ConcurrentMarkSweepThread::run() {
while (!_should_terminate) { while (!_should_terminate) {
sleepBeforeNextCycle(); sleepBeforeNextCycle();
if (_should_terminate) break; if (_should_terminate) break;
_collector->collect_in_background(false); // !clear_all_soft_refs GCCause::Cause cause = _collector->_full_gc_requested ?
_collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
_collector->collect_in_background(false, cause);
} }
assert(_should_terminate, "just checking"); assert(_should_terminate, "just checking");
// Check that the state of any protocol for synchronization // Check that the state of any protocol for synchronization

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -26,9 +26,12 @@
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp" #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
#include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp" #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp"
#include "memory/gcLocker.inline.hpp" #include "memory/gcLocker.inline.hpp"
#include "runtime/interfaceSupport.hpp" #include "runtime/interfaceSupport.hpp"
#include "runtime/os.hpp"
#include "utilities/dtrace.hpp" #include "utilities/dtrace.hpp"
@ -60,6 +63,7 @@ void VM_CMS_Operation::release_and_notify_pending_list_lock() {
void VM_CMS_Operation::verify_before_gc() { void VM_CMS_Operation::verify_before_gc() {
if (VerifyBeforeGC && if (VerifyBeforeGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm);
HandleMark hm; HandleMark hm;
FreelistLocker x(_collector); FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@ -71,6 +75,7 @@ void VM_CMS_Operation::verify_before_gc() {
void VM_CMS_Operation::verify_after_gc() { void VM_CMS_Operation::verify_after_gc() {
if (VerifyAfterGC && if (VerifyAfterGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm);
HandleMark hm; HandleMark hm;
FreelistLocker x(_collector); FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@ -140,6 +145,8 @@ void VM_CMS_Initial_Mark::doit() {
); );
#endif /* USDT2 */ #endif /* USDT2 */
_collector->_gc_timer_cm->register_gc_pause_start("Initial Mark", os::elapsed_counter());
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, GCCause::_cms_initial_mark); GCCauseSetter gccs(gch, GCCause::_cms_initial_mark);
@ -149,6 +156,9 @@ void VM_CMS_Initial_Mark::doit() {
_collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause()); _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause());
VM_CMS_Operation::verify_after_gc(); VM_CMS_Operation::verify_after_gc();
_collector->_gc_timer_cm->register_gc_pause_end(os::elapsed_counter());
#ifndef USDT2 #ifndef USDT2
HS_DTRACE_PROBE(hs_private, cms__initmark__end); HS_DTRACE_PROBE(hs_private, cms__initmark__end);
#else /* USDT2 */ #else /* USDT2 */
@ -172,6 +182,8 @@ void VM_CMS_Final_Remark::doit() {
); );
#endif /* USDT2 */ #endif /* USDT2 */
_collector->_gc_timer_cm->register_gc_pause_start("Final Mark", os::elapsed_counter());
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, GCCause::_cms_final_remark); GCCauseSetter gccs(gch, GCCause::_cms_final_remark);
@ -181,6 +193,10 @@ void VM_CMS_Final_Remark::doit() {
_collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause()); _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause());
VM_CMS_Operation::verify_after_gc(); VM_CMS_Operation::verify_after_gc();
_collector->save_heap_summary();
_collector->_gc_timer_cm->register_gc_pause_end(os::elapsed_counter());
#ifndef USDT2 #ifndef USDT2
HS_DTRACE_PROBE(hs_private, cms__remark__end); HS_DTRACE_PROBE(hs_private, cms__remark__end);
#else /* USDT2 */ #else /* USDT2 */
@ -225,7 +241,7 @@ void VM_GenCollectFullConcurrent::doit() {
// In case CMS thread was in icms_wait(), wake it up. // In case CMS thread was in icms_wait(), wake it up.
CMSCollector::start_icms(); CMSCollector::start_icms();
// Nudge the CMS thread to start a concurrent collection. // Nudge the CMS thread to start a concurrent collection.
CMSCollector::request_full_gc(_full_gc_count_before); CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
} else { } else {
assert(_full_gc_count_before < gch->total_full_collections(), "Error"); assert(_full_gc_count_before < gch->total_full_collections(), "Error");
FullGCCount_lock->notify_all(); // Inform the Java thread its work is done FullGCCount_lock->notify_all(); // Inform the Java thread its work is done

View File

@ -36,6 +36,9 @@
#include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp" #include "gc_implementation/shared/vmGCOperations.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "memory/genOopClosures.inline.hpp" #include "memory/genOopClosures.inline.hpp"
#include "memory/referencePolicy.hpp" #include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
@ -1342,6 +1345,9 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
_remark_times.add((now - start) * 1000.0); _remark_times.add((now - start) * 1000.0);
g1p->record_concurrent_mark_remark_end(); g1p->record_concurrent_mark_remark_end();
G1CMIsAliveClosure is_alive(g1h);
g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
} }
// Base class of the closures that finalize and verify the // Base class of the closures that finalize and verify the
@ -2129,6 +2135,7 @@ void ConcurrentMark::cleanup() {
} }
g1h->verify_region_sets_optional(); g1h->verify_region_sets_optional();
g1h->trace_heap_after_concurrent_cycle();
} }
void ConcurrentMark::completeCleanup() { void ConcurrentMark::completeCleanup() {
@ -2439,7 +2446,7 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
if (G1Log::finer()) { if (G1Log::finer()) {
gclog_or_tty->put(' '); gclog_or_tty->put(' ');
} }
TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty); GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm());
ReferenceProcessor* rp = g1h->ref_processor_cm(); ReferenceProcessor* rp = g1h->ref_processor_cm();
@ -2491,10 +2498,13 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
rp->set_active_mt_degree(active_workers); rp->set_active_mt_degree(active_workers);
// Process the weak references. // Process the weak references.
rp->process_discovered_references(&g1_is_alive, const ReferenceProcessorStats& stats =
&g1_keep_alive, rp->process_discovered_references(&g1_is_alive,
&g1_drain_mark_stack, &g1_keep_alive,
executor); &g1_drain_mark_stack,
executor,
g1h->gc_timer_cm());
g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
// The do_oop work routines of the keep_alive and drain_marking_stack // The do_oop work routines of the keep_alive and drain_marking_stack
// oop closures will set the has_overflown flag if we overflow the // oop closures will set the has_overflown flag if we overflow the
@ -3227,6 +3237,9 @@ void ConcurrentMark::abort() {
satb_mq_set.set_active_all_threads( satb_mq_set.set_active_all_threads(
false, /* new active value */ false, /* new active value */
satb_mq_set.is_active() /* expected_active */); satb_mq_set.is_active() /* expected_active */);
_g1h->trace_heap_after_concurrent_cycle();
_g1h->register_concurrent_cycle_end();
} }
static void print_ms_time_info(const char* prefix, const char* name, static void print_ms_time_info(const char* prefix, const char* name,

View File

@ -569,8 +569,6 @@ protected:
void clear_has_overflown() { _has_overflown = false; } void clear_has_overflown() { _has_overflown = false; }
bool restart_for_overflow() { return _restart_for_overflow; } bool restart_for_overflow() { return _restart_for_overflow; }
bool has_aborted() { return _has_aborted; }
// Methods to enter the two overflow sync barriers // Methods to enter the two overflow sync barriers
void enter_first_sync_barrier(uint worker_id); void enter_first_sync_barrier(uint worker_id);
void enter_second_sync_barrier(uint worker_id); void enter_second_sync_barrier(uint worker_id);
@ -821,6 +819,8 @@ public:
// Called to abort the marking cycle after a Full GC takes palce. // Called to abort the marking cycle after a Full GC takes palce.
void abort(); void abort();
bool has_aborted() { return _has_aborted; }
// This prints the global/local fingers. It is used for debugging. // This prints the global/local fingers. It is used for debugging.
NOT_PRODUCT(void print_finger();) NOT_PRODUCT(void print_finger();)

View File

@ -93,7 +93,6 @@ void ConcurrentMarkThread::run() {
ResourceMark rm; ResourceMark rm;
HandleMark hm; HandleMark hm;
double cycle_start = os::elapsedVTime(); double cycle_start = os::elapsedVTime();
char verbose_str[128];
// We have to ensure that we finish scanning the root regions // We have to ensure that we finish scanning the root regions
// before the next GC takes place. To ensure this we have to // before the next GC takes place. To ensure this we have to
@ -155,8 +154,7 @@ void ConcurrentMarkThread::run() {
} }
CMCheckpointRootsFinalClosure final_cl(_cm); CMCheckpointRootsFinalClosure final_cl(_cm);
sprintf(verbose_str, "GC remark"); VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */);
VM_CGC_Operation op(&final_cl, verbose_str, true /* needs_pll */);
VMThread::execute(&op); VMThread::execute(&op);
} }
if (cm()->restart_for_overflow()) { if (cm()->restart_for_overflow()) {
@ -187,8 +185,7 @@ void ConcurrentMarkThread::run() {
} }
CMCleanUp cl_cl(_cm); CMCleanUp cl_cl(_cm);
sprintf(verbose_str, "GC cleanup"); VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */);
VM_CGC_Operation op(&cl_cl, verbose_str, false /* needs_pll */);
VMThread::execute(&op); VMThread::execute(&op);
} else { } else {
// We don't want to update the marking status if a GC pause // We don't want to update the marking status if a GC pause
@ -292,6 +289,7 @@ void ConcurrentMarkThread::run() {
// called System.gc() with +ExplicitGCInvokesConcurrent). // called System.gc() with +ExplicitGCInvokesConcurrent).
_sts.join(); _sts.join();
g1h->increment_old_marking_cycles_completed(true /* concurrent */); g1h->increment_old_marking_cycles_completed(true /* concurrent */);
g1h->register_concurrent_cycle_end();
_sts.leave(); _sts.leave();
} }
assert(_should_terminate, "just checking"); assert(_should_terminate, "just checking");

View File

@ -0,0 +1,81 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP
#define SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP
#include "memory/allocation.hpp"
class EvacuationInfo : public StackObj {
uint _collectionset_regions;
uint _allocation_regions;
size_t _collectionset_used_before;
size_t _collectionset_used_after;
size_t _alloc_regions_used_before;
size_t _bytes_copied;
uint _regions_freed;
public:
EvacuationInfo() : _collectionset_regions(0), _allocation_regions(0), _collectionset_used_before(0),
_collectionset_used_after(0), _alloc_regions_used_before(0),
_bytes_copied(0), _regions_freed(0) { }
void set_collectionset_regions(uint collectionset_regions) {
_collectionset_regions = collectionset_regions;
}
void set_allocation_regions(uint allocation_regions) {
_allocation_regions = allocation_regions;
}
void set_collectionset_used_before(size_t used) {
_collectionset_used_before = used;
}
void increment_collectionset_used_after(size_t used) {
_collectionset_used_after += used;
}
void set_alloc_regions_used_before(size_t used) {
_alloc_regions_used_before = used;
}
void set_bytes_copied(size_t copied) {
_bytes_copied = copied;
}
void set_regions_freed(uint freed) {
_regions_freed += freed;
}
uint collectionset_regions() { return _collectionset_regions; }
uint allocation_regions() { return _allocation_regions; }
size_t collectionset_used_before() { return _collectionset_used_before; }
size_t collectionset_used_after() { return _collectionset_used_after; }
size_t alloc_regions_used_before() { return _alloc_regions_used_before; }
size_t bytes_copied() { return _bytes_copied; }
uint regions_freed() { return _regions_freed; }
};
#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP

View File

@ -38,10 +38,15 @@
#include "gc_implementation/g1/g1MarkSweep.hpp" #include "gc_implementation/g1/g1MarkSweep.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp" #include "gc_implementation/g1/g1RemSet.inline.hpp"
#include "gc_implementation/g1/g1YCTypes.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp" #include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/g1/vm_operations_g1.hpp" #include "gc_implementation/g1/vm_operations_g1.hpp"
#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp"
#include "memory/gcLocker.inline.hpp" #include "memory/gcLocker.inline.hpp"
#include "memory/genOopClosures.inline.hpp" #include "memory/genOopClosures.inline.hpp"
@ -76,7 +81,7 @@ size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
// The number of GC workers is passed to heap_region_par_iterate_chunked(). // The number of GC workers is passed to heap_region_par_iterate_chunked().
// It does use run_task() which sets _n_workers in the task. // It does use run_task() which sets _n_workers in the task.
// G1ParTask executes g1_process_strong_roots() -> // G1ParTask executes g1_process_strong_roots() ->
// SharedHeap::process_strong_roots() which calls eventuall to // SharedHeap::process_strong_roots() which calls eventually to
// CardTableModRefBS::par_non_clean_card_iterate_work() which uses // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
// SequentialSubTasksDone. SharedHeap::process_strong_roots() also // SequentialSubTasksDone. SharedHeap::process_strong_roots() also
// directly uses SubTasksDone (_process_strong_tasks field in SharedHeap). // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
@ -457,7 +462,7 @@ bool G1CollectedHeap::is_in_partial_collection(const void* p) {
#endif #endif
// Returns true if the reference points to an object that // Returns true if the reference points to an object that
// can move in an incremental collecction. // can move in an incremental collection.
bool G1CollectedHeap::is_scavengable(const void* p) { bool G1CollectedHeap::is_scavengable(const void* p) {
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1CollectorPolicy* g1p = g1h->g1_policy(); G1CollectorPolicy* g1p = g1h->g1_policy();
@ -548,7 +553,7 @@ G1CollectedHeap::new_region_try_secondary_free_list() {
return res; return res;
} }
// Wait here until we get notifed either when (a) there are no // Wait here until we get notified either when (a) there are no
// more free regions coming or (b) some regions have been moved on // more free regions coming or (b) some regions have been moved on
// the secondary_free_list. // the secondary_free_list.
SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
@ -623,7 +628,7 @@ uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
uint first = G1_NULL_HRS_INDEX; uint first = G1_NULL_HRS_INDEX;
if (num_regions == 1) { if (num_regions == 1) {
// Only one region to allocate, no need to go through the slower // Only one region to allocate, no need to go through the slower
// path. The caller will attempt the expasion if this fails, so // path. The caller will attempt the expansion if this fails, so
// let's not try to expand here too. // let's not try to expand here too.
HeapRegion* hr = new_region(word_size, false /* do_expand */); HeapRegion* hr = new_region(word_size, false /* do_expand */);
if (hr != NULL) { if (hr != NULL) {
@ -688,7 +693,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
// the first region. // the first region.
HeapWord* new_obj = first_hr->bottom(); HeapWord* new_obj = first_hr->bottom();
// This will be the new end of the first region in the series that // This will be the new end of the first region in the series that
// should also match the end of the last region in the seriers. // should also match the end of the last region in the series.
HeapWord* new_end = new_obj + word_size_sum; HeapWord* new_end = new_obj + word_size_sum;
// This will be the new top of the first region that will reflect // This will be the new top of the first region that will reflect
// this allocation. // this allocation.
@ -863,7 +868,7 @@ G1CollectedHeap::mem_allocate(size_t word_size,
bool* gc_overhead_limit_was_exceeded) { bool* gc_overhead_limit_was_exceeded) {
assert_heap_not_locked_and_not_at_safepoint(); assert_heap_not_locked_and_not_at_safepoint();
// Loop until the allocation is satisified, or unsatisfied after GC. // Loop until the allocation is satisfied, or unsatisfied after GC.
for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
unsigned int gc_count_before; unsigned int gc_count_before;
@ -1003,7 +1008,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
(*gclocker_retry_count_ret) += 1; (*gclocker_retry_count_ret) += 1;
} }
// We can reach here if we were unsuccessul in scheduling a // We can reach here if we were unsuccessful in scheduling a
// collection (because another thread beat us to it) or if we were // collection (because another thread beat us to it) or if we were
// stalled due to the GC locker. In either can we should retry the // stalled due to the GC locker. In either can we should retry the
// allocation attempt in case another thread successfully // allocation attempt in case another thread successfully
@ -1128,7 +1133,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
(*gclocker_retry_count_ret) += 1; (*gclocker_retry_count_ret) += 1;
} }
// We can reach here if we were unsuccessul in scheduling a // We can reach here if we were unsuccessful in scheduling a
// collection (because another thread beat us to it) or if we were // collection (because another thread beat us to it) or if we were
// stalled due to the GC locker. In either can we should retry the // stalled due to the GC locker. In either can we should retry the
// allocation attempt in case another thread successfully // allocation attempt in case another thread successfully
@ -1298,10 +1303,17 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
return false; return false;
} }
STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
gc_timer->register_gc_start(os::elapsed_counter());
SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
SvcGCMarker sgcm(SvcGCMarker::FULL); SvcGCMarker sgcm(SvcGCMarker::FULL);
ResourceMark rm; ResourceMark rm;
print_heap_before_gc(); print_heap_before_gc();
trace_heap_before_gc(gc_tracer);
size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
@ -1322,7 +1334,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
{ {
TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty); GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
TraceCollectorStats tcs(g1mm()->full_collection_counters()); TraceCollectorStats tcs(g1mm()->full_collection_counters());
TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
@ -1351,7 +1363,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
verify_before_gc(); verify_before_gc();
pre_full_gc_dump(); pre_full_gc_dump(gc_timer);
COMPILER2_PRESENT(DerivedPointerTable::clear()); COMPILER2_PRESENT(DerivedPointerTable::clear());
@ -1433,7 +1445,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
reset_gc_time_stamp(); reset_gc_time_stamp();
// Since everything potentially moved, we will clear all remembered // Since everything potentially moved, we will clear all remembered
// sets, and clear all cards. Later we will rebuild remebered // sets, and clear all cards. Later we will rebuild remembered
// sets. We will also reset the GC time stamps of the regions. // sets. We will also reset the GC time stamps of the regions.
clear_rsets_post_compaction(); clear_rsets_post_compaction();
check_gc_time_stamps(); check_gc_time_stamps();
@ -1553,8 +1565,12 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
} }
print_heap_after_gc(); print_heap_after_gc();
trace_heap_after_gc(gc_tracer);
post_full_gc_dump(); post_full_gc_dump(gc_timer);
gc_timer->register_gc_end(os::elapsed_counter());
gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
} }
return true; return true;
@ -1919,7 +1935,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_ref_processor_stw(NULL), _ref_processor_stw(NULL),
_process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
_bot_shared(NULL), _bot_shared(NULL),
_evac_failure_scan_stack(NULL) , _evac_failure_scan_stack(NULL),
_mark_in_progress(false), _mark_in_progress(false),
_cg1r(NULL), _summary_bytes_used(0), _cg1r(NULL), _summary_bytes_used(0),
_g1mm(NULL), _g1mm(NULL),
@ -1939,12 +1955,18 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_surviving_young_words(NULL), _surviving_young_words(NULL),
_old_marking_cycles_started(0), _old_marking_cycles_started(0),
_old_marking_cycles_completed(0), _old_marking_cycles_completed(0),
_concurrent_cycle_started(false),
_in_cset_fast_test(NULL), _in_cset_fast_test(NULL),
_in_cset_fast_test_base(NULL), _in_cset_fast_test_base(NULL),
_dirty_cards_region_list(NULL), _dirty_cards_region_list(NULL),
_worker_cset_start_region(NULL), _worker_cset_start_region(NULL),
_worker_cset_start_region_time_stamp(NULL) { _worker_cset_start_region_time_stamp(NULL),
_g1h = this; // To catch bugs. _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
_gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
_g1h = this;
if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
vm_exit_during_initialization("Failed necessary allocation."); vm_exit_during_initialization("Failed necessary allocation.");
} }
@ -1959,13 +1981,14 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
_worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC); _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
_evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
for (int i = 0; i < n_queues; i++) { for (int i = 0; i < n_queues; i++) {
RefToScanQueue* q = new RefToScanQueue(); RefToScanQueue* q = new RefToScanQueue();
q->initialize(); q->initialize();
_task_queues->register_queue(i, q); _task_queues->register_queue(i, q);
::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
} }
clear_cset_start_regions(); clear_cset_start_regions();
// Initialize the G1EvacuationFailureALot counters and flags. // Initialize the G1EvacuationFailureALot counters and flags.
@ -2025,7 +2048,7 @@ jint G1CollectedHeap::initialize() {
HeapRegion::GrainBytes); HeapRegion::GrainBytes);
// It is important to do this in a way such that concurrent readers can't // It is important to do this in a way such that concurrent readers can't
// temporarily think somethings in the heap. (I've actually seen this // temporarily think something is in the heap. (I've actually seen this
// happen in asserts: DLD.) // happen in asserts: DLD.)
_reserved.set_word_size(0); _reserved.set_word_size(0);
_reserved.set_start((HeapWord*)heap_rs.base()); _reserved.set_start((HeapWord*)heap_rs.base());
@ -2462,7 +2485,7 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
// We need to clear the "in_progress" flag in the CM thread before // We need to clear the "in_progress" flag in the CM thread before
// we wake up any waiters (especially when ExplicitInvokesConcurrent // we wake up any waiters (especially when ExplicitInvokesConcurrent
// is set) so that if a waiter requests another System.gc() it doesn't // is set) so that if a waiter requests another System.gc() it doesn't
// incorrectly see that a marking cyle is still in progress. // incorrectly see that a marking cycle is still in progress.
if (concurrent) { if (concurrent) {
_cmThread->clear_in_progress(); _cmThread->clear_in_progress();
} }
@ -2474,6 +2497,49 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
FullGCCount_lock->notify_all(); FullGCCount_lock->notify_all();
} }
void G1CollectedHeap::register_concurrent_cycle_start(jlong start_time) {
_concurrent_cycle_started = true;
_gc_timer_cm->register_gc_start(start_time);
_gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
trace_heap_before_gc(_gc_tracer_cm);
}
void G1CollectedHeap::register_concurrent_cycle_end() {
if (_concurrent_cycle_started) {
_gc_timer_cm->register_gc_end(os::elapsed_counter());
if (_cm->has_aborted()) {
_gc_tracer_cm->report_concurrent_mode_failure();
}
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
_concurrent_cycle_started = false;
}
}
void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
if (_concurrent_cycle_started) {
trace_heap_after_gc(_gc_tracer_cm);
}
}
G1YCType G1CollectedHeap::yc_type() {
bool is_young = g1_policy()->gcs_are_young();
bool is_initial_mark = g1_policy()->during_initial_mark_pause();
bool is_during_mark = mark_in_progress();
if (is_initial_mark) {
return InitialMark;
} else if (is_during_mark) {
return DuringMark;
} else if (is_young) {
return Normal;
} else {
return Mixed;
}
}
void G1CollectedHeap::collect(GCCause::Cause cause) { void G1CollectedHeap::collect(GCCause::Cause cause) {
assert_heap_not_locked(); assert_heap_not_locked();
@ -2676,13 +2742,13 @@ G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
break; break;
} }
// Noone should have claimed it directly. We can given // No one should have claimed it directly. We can given
// that we claimed its "starts humongous" region. // that we claimed its "starts humongous" region.
assert(chr->claim_value() != claim_value, "sanity"); assert(chr->claim_value() != claim_value, "sanity");
assert(chr->humongous_start_region() == r, "sanity"); assert(chr->humongous_start_region() == r, "sanity");
if (chr->claimHeapRegion(claim_value)) { if (chr->claimHeapRegion(claim_value)) {
// we should always be able to claim it; noone else should // we should always be able to claim it; no one else should
// be trying to claim this region // be trying to claim this region
bool res2 = cl->doHeapRegion(chr); bool res2 = cl->doHeapRegion(chr);
@ -2976,7 +3042,7 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
// the min TLAB size. // the min TLAB size.
// Also, this value can be at most the humongous object threshold, // Also, this value can be at most the humongous object threshold,
// since we can't allow tlabs to grow big enough to accomodate // since we can't allow tlabs to grow big enough to accommodate
// humongous objects. // humongous objects.
HeapRegion* hr = _mutator_alloc_region.get(); HeapRegion* hr = _mutator_alloc_region.get();
@ -3743,10 +3809,15 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
return false; return false;
} }
_gc_timer_stw->register_gc_start(os::elapsed_counter());
_gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
SvcGCMarker sgcm(SvcGCMarker::MINOR); SvcGCMarker sgcm(SvcGCMarker::MINOR);
ResourceMark rm; ResourceMark rm;
print_heap_before_gc(); print_heap_before_gc();
trace_heap_before_gc(_gc_tracer_stw);
HRSPhaseSetter x(HRSPhaseEvacuation); HRSPhaseSetter x(HRSPhaseEvacuation);
verify_region_sets_optional(); verify_region_sets_optional();
@ -3771,11 +3842,17 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// Inner scope for scope based logging, timers, and stats collection // Inner scope for scope based logging, timers, and stats collection
{ {
EvacuationInfo evacuation_info;
if (g1_policy()->during_initial_mark_pause()) { if (g1_policy()->during_initial_mark_pause()) {
// We are about to start a marking cycle, so we increment the // We are about to start a marking cycle, so we increment the
// full collection counter. // full collection counter.
increment_old_marking_cycles_started(); increment_old_marking_cycles_started();
register_concurrent_cycle_start(_gc_timer_stw->gc_start());
} }
_gc_tracer_stw->report_yc_type(yc_type());
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ? int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
@ -3885,7 +3962,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
#endif // YOUNG_LIST_VERBOSE #endif // YOUNG_LIST_VERBOSE
g1_policy()->finalize_cset(target_pause_time_ms); g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
_cm->note_start_of_gc(); _cm->note_start_of_gc();
// We should not verify the per-thread SATB buffers given that // We should not verify the per-thread SATB buffers given that
@ -3921,10 +3998,10 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
setup_surviving_young_words(); setup_surviving_young_words();
// Initialize the GC alloc regions. // Initialize the GC alloc regions.
init_gc_alloc_regions(); init_gc_alloc_regions(evacuation_info);
// Actually do the work... // Actually do the work...
evacuate_collection_set(); evacuate_collection_set(evacuation_info);
// We do this to mainly verify the per-thread SATB buffers // We do this to mainly verify the per-thread SATB buffers
// (which have been filtered by now) since we didn't verify // (which have been filtered by now) since we didn't verify
@ -3936,7 +4013,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
true /* verify_thread_buffers */, true /* verify_thread_buffers */,
true /* verify_fingers */); true /* verify_fingers */);
free_collection_set(g1_policy()->collection_set()); free_collection_set(g1_policy()->collection_set(), evacuation_info);
g1_policy()->clear_collection_set(); g1_policy()->clear_collection_set();
cleanup_surviving_young_words(); cleanup_surviving_young_words();
@ -3964,13 +4041,19 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
#endif // YOUNG_LIST_VERBOSE #endif // YOUNG_LIST_VERBOSE
g1_policy()->record_survivor_regions(_young_list->survivor_length(), g1_policy()->record_survivor_regions(_young_list->survivor_length(),
_young_list->first_survivor_region(), _young_list->first_survivor_region(),
_young_list->last_survivor_region()); _young_list->last_survivor_region());
_young_list->reset_auxilary_lists(); _young_list->reset_auxilary_lists();
if (evacuation_failed()) { if (evacuation_failed()) {
_summary_bytes_used = recalculate_used(); _summary_bytes_used = recalculate_used();
uint n_queues = MAX2((int)ParallelGCThreads, 1);
for (uint i = 0; i < n_queues; i++) {
if (_evacuation_failed_info_array[i].has_failed()) {
_gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
}
}
} else { } else {
// The "used" of the the collection set have already been subtracted // The "used" of the the collection set have already been subtracted
// when they were freed. Add in the bytes evacuated. // when they were freed. Add in the bytes evacuated.
@ -4013,7 +4096,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
} }
} }
// We redo the verificaiton but now wrt to the new CSet which // We redo the verification but now wrt to the new CSet which
// has just got initialized after the previous CSet was freed. // has just got initialized after the previous CSet was freed.
_cm->verify_no_cset_oops(true /* verify_stacks */, _cm->verify_no_cset_oops(true /* verify_stacks */,
true /* verify_enqueued_buffers */, true /* verify_enqueued_buffers */,
@ -4026,7 +4109,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// investigate this in CR 7178365. // investigate this in CR 7178365.
double sample_end_time_sec = os::elapsedTime(); double sample_end_time_sec = os::elapsedTime();
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
g1_policy()->record_collection_pause_end(pause_time_ms); g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info);
MemoryService::track_memory_usage(); MemoryService::track_memory_usage();
@ -4093,14 +4176,19 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
print_heap_after_gc(); print_heap_after_gc();
trace_heap_after_gc(_gc_tracer_stw);
// We must call G1MonitoringSupport::update_sizes() in the same scoping level // We must call G1MonitoringSupport::update_sizes() in the same scoping level
// as an active TraceMemoryManagerStats object (i.e. before the destructor for the // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
// TraceMemoryManagerStats is called) so that the G1 memory pools are updated // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
// before any GC notifications are raised. // before any GC notifications are raised.
g1mm()->update_sizes(); g1mm()->update_sizes();
}
_gc_tracer_stw->report_evacuation_info(&evacuation_info);
_gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
_gc_timer_stw->register_gc_end(os::elapsed_counter());
_gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
}
// It should now be safe to tell the concurrent mark thread to start // It should now be safe to tell the concurrent mark thread to start
// without its logging output interfering with the logging output // without its logging output interfering with the logging output
// that came from the pause. // that came from the pause.
@ -4152,7 +4240,7 @@ void G1CollectedHeap::release_mutator_alloc_region() {
assert(_mutator_alloc_region.get() == NULL, "post-condition"); assert(_mutator_alloc_region.get() == NULL, "post-condition");
} }
void G1CollectedHeap::init_gc_alloc_regions() { void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
assert_at_safepoint(true /* should_be_vm_thread */); assert_at_safepoint(true /* should_be_vm_thread */);
_survivor_gc_alloc_region.init(); _survivor_gc_alloc_region.init();
@ -4167,7 +4255,7 @@ void G1CollectedHeap::init_gc_alloc_regions() {
// a cleanup and it should be on the free list now), or // a cleanup and it should be on the free list now), or
// d) it's humongous (this means that it was emptied // d) it's humongous (this means that it was emptied
// during a cleanup and was added to the free list, but // during a cleanup and was added to the free list, but
// has been subseqently used to allocate a humongous // has been subsequently used to allocate a humongous
// object that may be less than the region size). // object that may be less than the region size).
if (retained_region != NULL && if (retained_region != NULL &&
!retained_region->in_collection_set() && !retained_region->in_collection_set() &&
@ -4184,10 +4272,13 @@ void G1CollectedHeap::init_gc_alloc_regions() {
retained_region->note_start_of_copying(during_im); retained_region->note_start_of_copying(during_im);
_old_gc_alloc_region.set(retained_region); _old_gc_alloc_region.set(retained_region);
_hr_printer.reuse(retained_region); _hr_printer.reuse(retained_region);
evacuation_info.set_alloc_regions_used_before(retained_region->used());
} }
} }
void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers) { void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
_old_gc_alloc_region.count());
_survivor_gc_alloc_region.release(); _survivor_gc_alloc_region.release();
// If we have an old GC alloc region to release, we'll save it in // If we have an old GC alloc region to release, we'll save it in
// _retained_old_gc_alloc_region. If we don't // _retained_old_gc_alloc_region. If we don't
@ -4270,7 +4361,7 @@ void G1CollectedHeap::drain_evac_failure_scan_stack() {
} }
oop oop
G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state,
oop old) { oop old) {
assert(obj_in_cs(old), assert(obj_in_cs(old),
err_msg("obj: "PTR_FORMAT" should still be in the CSet", err_msg("obj: "PTR_FORMAT" should still be in the CSet",
@ -4279,7 +4370,12 @@ G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
oop forward_ptr = old->forward_to_atomic(old); oop forward_ptr = old->forward_to_atomic(old);
if (forward_ptr == NULL) { if (forward_ptr == NULL) {
// Forward-to-self succeeded. // Forward-to-self succeeded.
assert(_par_scan_state != NULL, "par scan state");
OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
uint queue_num = _par_scan_state->queue_num();
_evacuation_failed = true;
_evacuation_failed_info_array[queue_num].register_copy_failure(old->size());
if (_evac_failure_closure != cl) { if (_evac_failure_closure != cl) {
MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
assert(!_drain_in_progress, assert(!_drain_in_progress,
@ -4310,8 +4406,6 @@ G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
} }
void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
set_evacuation_failed(true);
preserve_mark_if_necessary(old, m); preserve_mark_if_necessary(old, m);
HeapRegion* r = heap_region_containing(old); HeapRegion* r = heap_region_containing(old);
@ -4561,8 +4655,7 @@ oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
if (obj_ptr == NULL) { if (obj_ptr == NULL) {
// This will either forward-to-self, or detect that someone else has // This will either forward-to-self, or detect that someone else has
// installed a forwarding pointer. // installed a forwarding pointer.
OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); return _g1->handle_evacuation_failure_par(_par_scan_state, old);
return _g1->handle_evacuation_failure_par(cl, old);
} }
oop obj = oop(obj_ptr); oop obj = oop(obj_ptr);
@ -5166,7 +5259,7 @@ public:
// will be copied, the reference field set to point to the // will be copied, the reference field set to point to the
// new location, and the RSet updated. Otherwise we need to // new location, and the RSet updated. Otherwise we need to
// use the the non-heap or metadata closures directly to copy // use the the non-heap or metadata closures directly to copy
// the refernt object and update the pointer, while avoiding // the referent object and update the pointer, while avoiding
// updating the RSet. // updating the RSet.
if (_g1h->is_in_g1_reserved(p)) { if (_g1h->is_in_g1_reserved(p)) {
@ -5334,7 +5427,7 @@ public:
} }
}; };
// Driver routine for parallel reference enqueing. // Driver routine for parallel reference enqueueing.
// Creates an instance of the ref enqueueing gang // Creates an instance of the ref enqueueing gang
// task and has the worker threads execute it. // task and has the worker threads execute it.
@ -5463,7 +5556,7 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
// processor would have seen that the reference object had already // processor would have seen that the reference object had already
// been 'discovered' and would have skipped discovering the reference, // been 'discovered' and would have skipped discovering the reference,
// but would not have treated the reference object as a regular oop. // but would not have treated the reference object as a regular oop.
// As a reult the copy closure would not have been applied to the // As a result the copy closure would not have been applied to the
// referent object. // referent object.
// //
// We need to explicitly copy these referent objects - the references // We need to explicitly copy these referent objects - the references
@ -5539,21 +5632,28 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
// Setup the soft refs policy... // Setup the soft refs policy...
rp->setup_policy(false); rp->setup_policy(false);
ReferenceProcessorStats stats;
if (!rp->processing_is_mt()) { if (!rp->processing_is_mt()) {
// Serial reference processing... // Serial reference processing...
rp->process_discovered_references(&is_alive, stats = rp->process_discovered_references(&is_alive,
&keep_alive, &keep_alive,
&drain_queue, &drain_queue,
NULL); NULL,
_gc_timer_stw);
} else { } else {
// Parallel reference processing // Parallel reference processing
assert(rp->num_q() == no_of_gc_workers, "sanity"); assert(rp->num_q() == no_of_gc_workers, "sanity");
assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers); G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor); stats = rp->process_discovered_references(&is_alive,
&keep_alive,
&drain_queue,
&par_task_executor,
_gc_timer_stw);
} }
_gc_tracer_stw->report_gc_reference_stats(stats);
// We have completed copying any necessary live referent objects // We have completed copying any necessary live referent objects
// (that were not copied during the actual pause) so we can // (that were not copied during the actual pause) so we can
// retire any active alloc buffers // retire any active alloc buffers
@ -5577,7 +5677,7 @@ void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
// Serial reference processing... // Serial reference processing...
rp->enqueue_discovered_references(); rp->enqueue_discovered_references();
} else { } else {
// Parallel reference enqueuing // Parallel reference enqueueing
assert(no_of_gc_workers == workers()->active_workers(), assert(no_of_gc_workers == workers()->active_workers(),
"Need to reset active workers"); "Need to reset active workers");
@ -5594,15 +5694,15 @@ void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
// FIXME // FIXME
// CM's reference processing also cleans up the string and symbol tables. // CM's reference processing also cleans up the string and symbol tables.
// Should we do that here also? We could, but it is a serial operation // Should we do that here also? We could, but it is a serial operation
// and could signicantly increase the pause time. // and could significantly increase the pause time.
double ref_enq_time = os::elapsedTime() - ref_enq_start; double ref_enq_time = os::elapsedTime() - ref_enq_start;
g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0); g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
} }
void G1CollectedHeap::evacuate_collection_set() { void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
_expand_heap_after_alloc_failure = true; _expand_heap_after_alloc_failure = true;
set_evacuation_failed(false); _evacuation_failed = false;
// Should G1EvacuationFailureALot be in effect for this GC? // Should G1EvacuationFailureALot be in effect for this GC?
NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();) NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
@ -5691,7 +5791,7 @@ void G1CollectedHeap::evacuate_collection_set() {
JNIHandles::weak_oops_do(&is_alive, &keep_alive); JNIHandles::weak_oops_do(&is_alive, &keep_alive);
} }
release_gc_alloc_regions(n_workers); release_gc_alloc_regions(n_workers, evacuation_info);
g1_rem_set()->cleanup_after_oops_into_collection_set_do(); g1_rem_set()->cleanup_after_oops_into_collection_set_do();
// Reset and re-enable the hot card cache. // Reset and re-enable the hot card cache.
@ -5714,7 +5814,7 @@ void G1CollectedHeap::evacuate_collection_set() {
// Enqueue any remaining references remaining on the STW // Enqueue any remaining references remaining on the STW
// reference processor's discovered lists. We need to do // reference processor's discovered lists. We need to do
// this after the card table is cleaned (and verified) as // this after the card table is cleaned (and verified) as
// the act of enqueuing entries on to the pending list // the act of enqueueing entries on to the pending list
// will log these updates (and dirty their associated // will log these updates (and dirty their associated
// cards). We need these updates logged to update any // cards). We need these updates logged to update any
// RSets. // RSets.
@ -5942,7 +6042,7 @@ void G1CollectedHeap::cleanUpCardTable() {
g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0); g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
} }
void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) {
size_t pre_used = 0; size_t pre_used = 0;
FreeRegionList local_free_list("Local List for CSet Freeing"); FreeRegionList local_free_list("Local List for CSet Freeing");
@ -6028,10 +6128,12 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
cur->set_evacuation_failed(false); cur->set_evacuation_failed(false);
// The region is now considered to be old. // The region is now considered to be old.
_old_set.add(cur); _old_set.add(cur);
evacuation_info.increment_collectionset_used_after(cur->used());
} }
cur = next; cur = next;
} }
evacuation_info.set_regions_freed(local_free_list.length());
policy->record_max_rs_lengths(rs_lengths); policy->record_max_rs_lengths(rs_lengths);
policy->cset_regions_freed(); policy->cset_regions_freed();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -26,10 +26,12 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
#include "gc_implementation/g1/concurrentMark.hpp" #include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/evacuationInfo.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp" #include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1HRPrinter.hpp" #include "gc_implementation/g1/g1HRPrinter.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/g1MonitoringSupport.hpp" #include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/g1YCTypes.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp" #include "gc_implementation/g1/heapRegionSeq.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp" #include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/shared/hSpaceCounters.hpp" #include "gc_implementation/shared/hSpaceCounters.hpp"
@ -61,7 +63,12 @@ class HeapRegionRemSetIterator;
class ConcurrentMark; class ConcurrentMark;
class ConcurrentMarkThread; class ConcurrentMarkThread;
class ConcurrentG1Refine; class ConcurrentG1Refine;
class ConcurrentGCTimer;
class GenerationCounters; class GenerationCounters;
class STWGCTimer;
class G1NewTracer;
class G1OldTracer;
class EvacuationFailedInfo;
typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue; typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet; typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
@ -160,7 +167,7 @@ public:
// An instance is embedded into the G1CH and used as the // An instance is embedded into the G1CH and used as the
// (optional) _is_alive_non_header closure in the STW // (optional) _is_alive_non_header closure in the STW
// reference processor. It is also extensively used during // reference processor. It is also extensively used during
// refence processing during STW evacuation pauses. // reference processing during STW evacuation pauses.
class G1STWIsAliveClosure: public BoolObjectClosure { class G1STWIsAliveClosure: public BoolObjectClosure {
G1CollectedHeap* _g1; G1CollectedHeap* _g1;
public: public:
@ -323,10 +330,10 @@ private:
void release_mutator_alloc_region(); void release_mutator_alloc_region();
// It initializes the GC alloc regions at the start of a GC. // It initializes the GC alloc regions at the start of a GC.
void init_gc_alloc_regions(); void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
// It releases the GC alloc regions at the end of a GC. // It releases the GC alloc regions at the end of a GC.
void release_gc_alloc_regions(uint no_of_gc_workers); void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
// It does any cleanup that needs to be done on the GC alloc regions // It does any cleanup that needs to be done on the GC alloc regions
// before a Full GC. // before a Full GC.
@ -389,6 +396,8 @@ private:
// concurrent cycles) we have completed. // concurrent cycles) we have completed.
volatile unsigned int _old_marking_cycles_completed; volatile unsigned int _old_marking_cycles_completed;
bool _concurrent_cycle_started;
// This is a non-product method that is helpful for testing. It is // This is a non-product method that is helpful for testing. It is
// called at the end of a GC and artificially expands the heap by // called at the end of a GC and artificially expands the heap by
// allocating a number of dead regions. This way we can induce very // allocating a number of dead regions. This way we can induce very
@ -734,6 +743,12 @@ public:
return _old_marking_cycles_completed; return _old_marking_cycles_completed;
} }
void register_concurrent_cycle_start(jlong start_time);
void register_concurrent_cycle_end();
void trace_heap_after_concurrent_cycle();
G1YCType yc_type();
G1HRPrinter* hr_printer() { return &_hr_printer; } G1HRPrinter* hr_printer() { return &_hr_printer; }
protected: protected:
@ -769,7 +784,7 @@ protected:
bool do_collection_pause_at_safepoint(double target_pause_time_ms); bool do_collection_pause_at_safepoint(double target_pause_time_ms);
// Actually do the work of evacuating the collection set. // Actually do the work of evacuating the collection set.
void evacuate_collection_set(); void evacuate_collection_set(EvacuationInfo& evacuation_info);
// The g1 remembered set of the heap. // The g1 remembered set of the heap.
G1RemSet* _g1_rem_set; G1RemSet* _g1_rem_set;
@ -794,7 +809,7 @@ protected:
// After a collection pause, make the regions in the CS into free // After a collection pause, make the regions in the CS into free
// regions. // regions.
void free_collection_set(HeapRegion* cs_head); void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info);
// Abandon the current collection set without recording policy // Abandon the current collection set without recording policy
// statistics or updating free lists. // statistics or updating free lists.
@ -863,9 +878,7 @@ protected:
// True iff a evacuation has failed in the current collection. // True iff a evacuation has failed in the current collection.
bool _evacuation_failed; bool _evacuation_failed;
// Set the attribute indicating whether evacuation has failed in the EvacuationFailedInfo* _evacuation_failed_info_array;
// current collection.
void set_evacuation_failed(bool b) { _evacuation_failed = b; }
// Failed evacuations cause some logical from-space objects to have // Failed evacuations cause some logical from-space objects to have
// forwarding pointers to themselves. Reset them. // forwarding pointers to themselves. Reset them.
@ -907,7 +920,7 @@ protected:
void finalize_for_evac_failure(); void finalize_for_evac_failure();
// An attempt to evacuate "obj" has failed; take necessary steps. // An attempt to evacuate "obj" has failed; take necessary steps.
oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); oop handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state, oop obj);
void handle_evacuation_failure_common(oop obj, markOop m); void handle_evacuation_failure_common(oop obj, markOop m);
#ifndef PRODUCT #ifndef PRODUCT
@ -939,13 +952,13 @@ protected:
inline bool evacuation_should_fail(); inline bool evacuation_should_fail();
// Reset the G1EvacuationFailureALot counters. Should be called at // Reset the G1EvacuationFailureALot counters. Should be called at
// the end of an evacuation pause in which an evacuation failure ocurred. // the end of an evacuation pause in which an evacuation failure occurred.
inline void reset_evacuation_should_fail(); inline void reset_evacuation_should_fail();
#endif // !PRODUCT #endif // !PRODUCT
// ("Weak") Reference processing support. // ("Weak") Reference processing support.
// //
// G1 has 2 instances of the referece processor class. One // G1 has 2 instances of the reference processor class. One
// (_ref_processor_cm) handles reference object discovery // (_ref_processor_cm) handles reference object discovery
// and subsequent processing during concurrent marking cycles. // and subsequent processing during concurrent marking cycles.
// //
@ -995,6 +1008,12 @@ protected:
// The (stw) reference processor... // The (stw) reference processor...
ReferenceProcessor* _ref_processor_stw; ReferenceProcessor* _ref_processor_stw;
STWGCTimer* _gc_timer_stw;
ConcurrentGCTimer* _gc_timer_cm;
G1OldTracer* _gc_tracer_cm;
G1NewTracer* _gc_tracer_stw;
// During reference object discovery, the _is_alive_non_header // During reference object discovery, the _is_alive_non_header
// closure (if non-null) is applied to the referent object to // closure (if non-null) is applied to the referent object to
// determine whether the referent is live. If so then the // determine whether the referent is live. If so then the
@ -1140,9 +1159,12 @@ public:
// The STW reference processor.... // The STW reference processor....
ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; } ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
// The Concurent Marking reference processor... // The Concurrent Marking reference processor...
ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; } ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
virtual size_t capacity() const; virtual size_t capacity() const;
virtual size_t used() const; virtual size_t used() const;
// This should be called when we're not holding the heap lock. The // This should be called when we're not holding the heap lock. The
@ -1200,7 +1222,7 @@ public:
// verify_region_sets_optional() is planted in the code for // verify_region_sets_optional() is planted in the code for
// list verification in non-product builds (and it can be enabled in // list verification in non-product builds (and it can be enabled in
// product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1). // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
#if HEAP_REGION_SET_FORCE_VERIFY #if HEAP_REGION_SET_FORCE_VERIFY
void verify_region_sets_optional() { void verify_region_sets_optional() {
verify_region_sets(); verify_region_sets();
@ -1266,7 +1288,7 @@ public:
// The same as above but assume that the caller holds the Heap_lock. // The same as above but assume that the caller holds the Heap_lock.
void collect_locked(GCCause::Cause cause); void collect_locked(GCCause::Cause cause);
// True iff a evacuation has failed in the most-recent collection. // True iff an evacuation has failed in the most-recent collection.
bool evacuation_failed() { return _evacuation_failed; } bool evacuation_failed() { return _evacuation_failed; }
// It will free a region if it has allocated objects in it that are // It will free a region if it has allocated objects in it that are
@ -1554,6 +1576,7 @@ public:
// Override; it uses the "prev" marking information // Override; it uses the "prev" marking information
virtual void verify(bool silent); virtual void verify(bool silent);
virtual void print_on(outputStream* st) const; virtual void print_on(outputStream* st) const;
virtual void print_extended_on(outputStream* st) const; virtual void print_extended_on(outputStream* st) const;
virtual void print_on_error(outputStream* st) const; virtual void print_on_error(outputStream* st) const;
@ -1839,7 +1862,7 @@ protected:
G1ParScanHeapEvacClosure* _evac_cl; G1ParScanHeapEvacClosure* _evac_cl;
G1ParScanPartialArrayClosure* _partial_scan_cl; G1ParScanPartialArrayClosure* _partial_scan_cl;
int _hash_seed; int _hash_seed;
uint _queue_num; uint _queue_num;
size_t _term_attempts; size_t _term_attempts;

View File

@ -909,7 +909,7 @@ bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc
// Anything below that is considered to be zero // Anything below that is considered to be zero
#define MIN_TIMER_GRANULARITY 0.0000001 #define MIN_TIMER_GRANULARITY 0.0000001
void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) { void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
double end_time_sec = os::elapsedTime(); double end_time_sec = os::elapsedTime();
assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(), assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
"otherwise, the subtraction below does not make sense"); "otherwise, the subtraction below does not make sense");
@ -941,6 +941,9 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
_mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
end_time_sec, false); end_time_sec, false);
evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
if (update_stats) { if (update_stats) {
_trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times()); _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
// this is where we update the allocation rate of the application // this is where we update the allocation rate of the application
@ -1896,7 +1899,7 @@ uint G1CollectorPolicy::calc_max_old_cset_length() {
} }
void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) {
double young_start_time_sec = os::elapsedTime(); double young_start_time_sec = os::elapsedTime();
YoungList* young_list = _g1->young_list(); YoungList* young_list = _g1->young_list();
@ -2102,6 +2105,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
double non_young_end_time_sec = os::elapsedTime(); double non_young_end_time_sec = os::elapsedTime();
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
evacuation_info.set_collectionset_regions(cset_region_length());
} }
void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) { void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {

View File

@ -671,7 +671,7 @@ public:
// Record the start and end of an evacuation pause. // Record the start and end of an evacuation pause.
void record_collection_pause_start(double start_time_sec); void record_collection_pause_start(double start_time_sec);
void record_collection_pause_end(double pause_time_ms); void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info);
// Record the start and end of a full collection. // Record the start and end of a full collection.
void record_full_collection_start(); void record_full_collection_start();
@ -720,7 +720,7 @@ public:
// Choose a new collection set. Marks the chosen regions as being // Choose a new collection set. Marks the chosen regions as being
// "in_collection_set", and links them together. The head and number of // "in_collection_set", and links them together. The head and number of
// the collection set are available via access methods. // the collection set are available via access methods.
void finalize_cset(double target_pause_time_ms); void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info);
// The head of the list (via "next_in_collection_set()") representing the // The head of the list (via "next_in_collection_set()") representing the
// current collection set. // current collection set.
@ -879,6 +879,7 @@ private:
ageTable _survivors_age_table; ageTable _survivors_age_table;
public: public:
uint tenuring_threshold() const { return _tenuring_threshold; }
inline GCAllocPurpose inline GCAllocPurpose
evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) { evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) {

View File

@ -38,7 +38,7 @@ class WorkerDataArray : public CHeapObj<mtGC> {
NOT_PRODUCT(static const T _uninitialized;) NOT_PRODUCT(static const T _uninitialized;)
// We are caching the sum and average to only have to calculate them once. // We are caching the sum and average to only have to calculate them once.
// This is not done in an MT-safe way. It is intetened to allow single // This is not done in an MT-safe way. It is intended to allow single
// threaded code to call sum() and average() multiple times in any order // threaded code to call sum() and average() multiple times in any order
// without having to worry about the cost. // without having to worry about the cost.
bool _has_new_data; bool _has_new_data;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -31,6 +31,10 @@
#include "code/icBuffer.hpp" #include "code/icBuffer.hpp"
#include "gc_implementation/g1/g1Log.hpp" #include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1MarkSweep.hpp" #include "gc_implementation/g1/g1MarkSweep.hpp"
#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "memory/gcLocker.hpp" #include "memory/gcLocker.hpp"
#include "memory/genCollectedHeap.hpp" #include "memory/genCollectedHeap.hpp"
#include "memory/modRefBarrierSet.hpp" #include "memory/modRefBarrierSet.hpp"
@ -119,7 +123,7 @@ void G1MarkSweep::allocate_stacks() {
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
bool clear_all_softrefs) { bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them // Recursively traverse all live objects and mark them
TraceTime tm("phase 1", G1Log::fine() && Verbose, true, gclog_or_tty); GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer());
GenMarkSweep::trace(" 1"); GenMarkSweep::trace(" 1");
SharedHeap* sh = SharedHeap::heap(); SharedHeap* sh = SharedHeap::heap();
@ -139,10 +143,13 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Sanity"); assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Sanity");
rp->setup_policy(clear_all_softrefs); rp->setup_policy(clear_all_softrefs);
rp->process_discovered_references(&GenMarkSweep::is_alive, const ReferenceProcessorStats& stats =
&GenMarkSweep::keep_alive, rp->process_discovered_references(&GenMarkSweep::is_alive,
&GenMarkSweep::follow_stack_closure, &GenMarkSweep::keep_alive,
NULL); &GenMarkSweep::follow_stack_closure,
NULL,
gc_timer());
gc_tracer()->report_gc_reference_stats(stats);
// This is the point where the entire marking should have completed. // This is the point where the entire marking should have completed.
@ -185,6 +192,8 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
gclog_or_tty->print_cr("]"); gclog_or_tty->print_cr("]");
} }
} }
gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive);
} }
class G1PrepareCompactClosure: public HeapRegionClosure { class G1PrepareCompactClosure: public HeapRegionClosure {
@ -257,7 +266,7 @@ void G1MarkSweep::mark_sweep_phase2() {
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
TraceTime tm("phase 2", G1Log::fine() && Verbose, true, gclog_or_tty); GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer());
GenMarkSweep::trace("2"); GenMarkSweep::trace("2");
// find the first region // find the first region
@ -294,7 +303,7 @@ void G1MarkSweep::mark_sweep_phase3() {
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
// Adjust the pointers to reflect the new locations // Adjust the pointers to reflect the new locations
TraceTime tm("phase 3", G1Log::fine() && Verbose, true, gclog_or_tty); GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer());
GenMarkSweep::trace("3"); GenMarkSweep::trace("3");
SharedHeap* sh = SharedHeap::heap(); SharedHeap* sh = SharedHeap::heap();
@ -353,7 +362,7 @@ void G1MarkSweep::mark_sweep_phase4() {
// to use a higher index (saved from phase2) when verifying perm_gen. // to use a higher index (saved from phase2) when verifying perm_gen.
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
TraceTime tm("phase 4", G1Log::fine() && Verbose, true, gclog_or_tty); GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer());
GenMarkSweep::trace("4"); GenMarkSweep::trace("4");
G1SpaceCompactClosure blk; G1SpaceCompactClosure blk;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -54,6 +54,9 @@ class G1MarkSweep : AllStatic {
static void invoke_at_safepoint(ReferenceProcessor* rp, static void invoke_at_safepoint(ReferenceProcessor* rp,
bool clear_all_softrefs); bool clear_all_softrefs);
static STWGCTimer* gc_timer() { return GenMarkSweep::_gc_timer; }
static SerialOldTracer* gc_tracer() { return GenMarkSweep::_gc_tracer; }
private: private:
// Mark live objects // Mark live objects

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -224,6 +224,7 @@ class G1MonitoringSupport : public CHeapObj<mtGC> {
// Monitoring support used by // Monitoring support used by
// MemoryService // MemoryService
// jstat counters // jstat counters
// Tracing
size_t overall_reserved() { return _overall_reserved; } size_t overall_reserved() { return _overall_reserved; }
size_t overall_committed() { return _overall_committed; } size_t overall_committed() { return _overall_committed; }

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP
#include "utilities/debug.hpp"
enum G1YCType {
Normal,
InitialMark,
DuringMark,
Mixed,
G1YCTypeEndSentinel
};
class G1YCTypeHelper {
public:
static const char* to_string(G1YCType type) {
switch(type) {
case Normal: return "Normal";
case InitialMark: return "Initial Mark";
case DuringMark: return "During Mark";
case Mixed: return "Mixed";
default: ShouldNotReachHere(); return NULL;
}
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,8 @@
#include "gc_implementation/g1/g1CollectorPolicy.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1Log.hpp" #include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/vm_operations_g1.hpp" #include "gc_implementation/g1/vm_operations_g1.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp"
#include "gc_implementation/g1/vm_operations_g1.hpp" #include "gc_implementation/g1/vm_operations_g1.hpp"
#include "runtime/interfaceSupport.hpp" #include "runtime/interfaceSupport.hpp"
@ -227,7 +229,7 @@ void VM_CGC_Operation::release_and_notify_pending_list_lock() {
void VM_CGC_Operation::doit() { void VM_CGC_Operation::doit() {
gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
TraceTime t(_printGCMessage, G1Log::fine(), true, gclog_or_tty); GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm());
SharedHeap* sh = SharedHeap::heap(); SharedHeap* sh = SharedHeap::heap();
// This could go away if CollectedHeap gave access to _gc_is_active... // This could go away if CollectedHeap gave access to _gc_is_active...
if (sh != NULL) { if (sh != NULL) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,11 @@
#include "gc_implementation/shared/adaptiveSizePolicy.hpp" #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
#include "gc_implementation/shared/ageTable.hpp" #include "gc_implementation/shared/ageTable.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp" #include "gc_implementation/shared/parGCAllocBuffer.hpp"
#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp" #include "gc_implementation/shared/spaceDecorator.hpp"
#include "memory/defNewGeneration.inline.hpp" #include "memory/defNewGeneration.inline.hpp"
#include "memory/genCollectedHeap.hpp" #include "memory/genCollectedHeap.hpp"
@ -75,7 +80,6 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
work_queue_set_, &term_), work_queue_set_, &term_),
_is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
_keep_alive_closure(&_scan_weak_ref_closure), _keep_alive_closure(&_scan_weak_ref_closure),
_promotion_failure_size(0),
_strong_roots_time(0.0), _term_time(0.0) _strong_roots_time(0.0), _term_time(0.0)
{ {
#if TASKQUEUE_STATS #if TASKQUEUE_STATS
@ -279,13 +283,10 @@ void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
} }
} }
void ParScanThreadState::print_and_clear_promotion_failure_size() { void ParScanThreadState::print_promotion_failure_size() {
if (_promotion_failure_size != 0) { if (_promotion_failed_info.has_failed() && PrintPromotionFailure) {
if (PrintPromotionFailure) { gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", _thread_num, _promotion_failed_info.first_size());
_thread_num, _promotion_failure_size);
}
_promotion_failure_size = 0;
} }
} }
@ -305,6 +306,7 @@ public:
inline ParScanThreadState& thread_state(int i); inline ParScanThreadState& thread_state(int i);
void trace_promotion_failed(YoungGCTracer& gc_tracer);
void reset(int active_workers, bool promotion_failed); void reset(int active_workers, bool promotion_failed);
void flush(); void flush();
@ -353,13 +355,21 @@ inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
return ((ParScanThreadState*)_data)[i]; return ((ParScanThreadState*)_data)[i];
} }
void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) {
for (int i = 0; i < length(); ++i) {
if (thread_state(i).promotion_failed()) {
gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info());
thread_state(i).promotion_failed_info().reset();
}
}
}
void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed) void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed)
{ {
_term.reset_for_reuse(active_threads); _term.reset_for_reuse(active_threads);
if (promotion_failed) { if (promotion_failed) {
for (int i = 0; i < length(); ++i) { for (int i = 0; i < length(); ++i) {
thread_state(i).print_and_clear_promotion_failure_size(); thread_state(i).print_promotion_failure_size();
} }
} }
} }
@ -583,14 +593,6 @@ void ParNewGenTask::set_for_termination(int active_workers) {
gch->set_n_termination(active_workers); gch->set_n_termination(active_workers);
} }
// The "i" passed to this method is the part of the work for
// this thread. It is not the worker ID. The "i" is derived
// from _started_workers which is incremented in internal_note_start()
// called in GangWorker loop() and which is called under the
// which is called under the protection of the gang monitor and is
// called after a task is started. So "i" is based on
// first-come-first-served.
void ParNewGenTask::work(uint worker_id) { void ParNewGenTask::work(uint worker_id) {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
// Since this is being done in a separate thread, need new resource // Since this is being done in a separate thread, need new resource
@ -876,16 +878,45 @@ void EvacuateFollowersClosureGeneral::do_void() {
} }
// A Generation that does parallel young-gen collection.
bool ParNewGeneration::_avoid_promotion_undo = false; bool ParNewGeneration::_avoid_promotion_undo = false;
// A Generation that does parallel young-gen collection. void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) {
assert(_promo_failure_scan_stack.is_empty(), "post condition");
_promo_failure_scan_stack.clear(true); // Clear cached segments.
remove_forwarding_pointers();
if (PrintGCDetails) {
gclog_or_tty->print(" (promotion failed)");
}
// All the spaces are in play for mark-sweep.
swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
from()->set_next_compaction_space(to());
gch->set_incremental_collection_failed();
// Inform the next generation that a promotion failure occurred.
_next_gen->promotion_failure_occurred();
// Trace promotion failure in the parallel GC threads
thread_state_set.trace_promotion_failed(gc_tracer);
// Single threaded code may have reported promotion failure to the global state
if (_promotion_failed_info.has_failed()) {
gc_tracer.report_promotion_failed(_promotion_failed_info);
}
// Reset the PromotionFailureALot counters.
NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
}
void ParNewGeneration::collect(bool full, void ParNewGeneration::collect(bool full,
bool clear_all_soft_refs, bool clear_all_soft_refs,
size_t size, size_t size,
bool is_tlab) { bool is_tlab) {
assert(full || size > 0, "otherwise we don't want to collect"); assert(full || size > 0, "otherwise we don't want to collect");
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
_gc_timer->register_gc_start(os::elapsed_counter());
assert(gch->kind() == CollectedHeap::GenCollectedHeap, assert(gch->kind() == CollectedHeap::GenCollectedHeap,
"not a CMS generational heap"); "not a CMS generational heap");
AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
@ -906,7 +937,7 @@ void ParNewGeneration::collect(bool full,
set_avoid_promotion_undo(true); set_avoid_promotion_undo(true);
} }
// If the next generation is too full to accomodate worst-case promotion // If the next generation is too full to accommodate worst-case promotion
// from this generation, pass on collection; let the next generation // from this generation, pass on collection; let the next generation
// do it. // do it.
if (!collection_attempt_is_safe()) { if (!collection_attempt_is_safe()) {
@ -915,6 +946,10 @@ void ParNewGeneration::collect(bool full,
} }
assert(to()->is_empty(), "Else not collection_attempt_is_safe"); assert(to()->is_empty(), "Else not collection_attempt_is_safe");
ParNewTracer gc_tracer;
gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
gch->trace_heap_before_gc(&gc_tracer);
init_assuming_no_promotion_failure(); init_assuming_no_promotion_failure();
if (UseAdaptiveSizePolicy) { if (UseAdaptiveSizePolicy) {
@ -922,7 +957,7 @@ void ParNewGeneration::collect(bool full,
size_policy->minor_collection_begin(); size_policy->minor_collection_begin();
} }
TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty); GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
// Capture heap used before collection (for printing). // Capture heap used before collection (for printing).
size_t gch_prev_used = gch->used(); size_t gch_prev_used = gch->used();
@ -975,17 +1010,21 @@ void ParNewGeneration::collect(bool full,
rp->setup_policy(clear_all_soft_refs); rp->setup_policy(clear_all_soft_refs);
// Can the mt_degree be set later (at run_task() time would be best)? // Can the mt_degree be set later (at run_task() time would be best)?
rp->set_active_mt_degree(active_workers); rp->set_active_mt_degree(active_workers);
ReferenceProcessorStats stats;
if (rp->processing_is_mt()) { if (rp->processing_is_mt()) {
ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
rp->process_discovered_references(&is_alive, &keep_alive, stats = rp->process_discovered_references(&is_alive, &keep_alive,
&evacuate_followers, &task_executor); &evacuate_followers, &task_executor,
_gc_timer);
} else { } else {
thread_state_set.flush(); thread_state_set.flush();
gch->set_par_threads(0); // 0 ==> non-parallel. gch->set_par_threads(0); // 0 ==> non-parallel.
gch->save_marks(); gch->save_marks();
rp->process_discovered_references(&is_alive, &keep_alive, stats = rp->process_discovered_references(&is_alive, &keep_alive,
&evacuate_followers, NULL); &evacuate_followers, NULL,
_gc_timer);
} }
gc_tracer.report_gc_reference_stats(stats);
if (!promotion_failed()) { if (!promotion_failed()) {
// Swap the survivor spaces. // Swap the survivor spaces.
eden()->clear(SpaceDecorator::Mangle); eden()->clear(SpaceDecorator::Mangle);
@ -1010,22 +1049,7 @@ void ParNewGeneration::collect(bool full,
adjust_desired_tenuring_threshold(); adjust_desired_tenuring_threshold();
} else { } else {
assert(_promo_failure_scan_stack.is_empty(), "post condition"); handle_promotion_failed(gch, thread_state_set, gc_tracer);
_promo_failure_scan_stack.clear(true); // Clear cached segments.
remove_forwarding_pointers();
if (PrintGCDetails) {
gclog_or_tty->print(" (promotion failed)");
}
// All the spaces are in play for mark-sweep.
swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
from()->set_next_compaction_space(to());
gch->set_incremental_collection_failed();
// Inform the next generation that a promotion failure occurred.
_next_gen->promotion_failure_occurred();
// Reset the PromotionFailureALot counters.
NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
} }
// set new iteration safe limit for the survivor spaces // set new iteration safe limit for the survivor spaces
from()->set_concurrent_iteration_safe_limit(from()->top()); from()->set_concurrent_iteration_safe_limit(from()->top());
@ -1065,6 +1089,13 @@ void ParNewGeneration::collect(bool full,
rp->enqueue_discovered_references(NULL); rp->enqueue_discovered_references(NULL);
} }
rp->verify_no_references_recorded(); rp->verify_no_references_recorded();
gch->trace_heap_after_gc(&gc_tracer);
gc_tracer.report_tenuring_threshold(tenuring_threshold());
_gc_timer->register_gc_end(os::elapsed_counter());
gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
} }
static int sum; static int sum;
@ -1174,8 +1205,7 @@ oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
new_obj = old; new_obj = old;
preserve_mark_if_necessary(old, m); preserve_mark_if_necessary(old, m);
// Log the size of the maiden promotion failure par_scan_state->register_promotion_failure(sz);
par_scan_state->log_promotion_failure(sz);
} }
old->forward_to(new_obj); old->forward_to(new_obj);
@ -1300,8 +1330,7 @@ oop ParNewGeneration::copy_to_survivor_space_with_undo(
failed_to_promote = true; failed_to_promote = true;
preserve_mark_if_necessary(old, m); preserve_mark_if_necessary(old, m);
// Log the size of the maiden promotion failure par_scan_state->register_promotion_failure(sz);
par_scan_state->log_promotion_failure(sz);
} }
} else { } else {
// Is in to-space; do copying ourselves. // Is in to-space; do copying ourselves.
@ -1599,8 +1628,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
} }
#undef BUSY #undef BUSY
void ParNewGeneration::ref_processor_init() void ParNewGeneration::ref_processor_init() {
{
if (_ref_processor == NULL) { if (_ref_processor == NULL) {
// Allocate and initialize a reference processor // Allocate and initialize a reference processor
_ref_processor = _ref_processor =

View File

@ -25,7 +25,9 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
#define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp" #include "gc_implementation/shared/parGCAllocBuffer.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp"
#include "memory/defNewGeneration.hpp" #include "memory/defNewGeneration.hpp"
#include "utilities/taskqueue.hpp" #include "utilities/taskqueue.hpp"
@ -105,7 +107,7 @@ class ParScanThreadState {
#endif // TASKQUEUE_STATS #endif // TASKQUEUE_STATS
// Stats for promotion failure // Stats for promotion failure
size_t _promotion_failure_size; PromotionFailedInfo _promotion_failed_info;
// Timing numbers. // Timing numbers.
double _start; double _start;
@ -180,13 +182,16 @@ class ParScanThreadState {
void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz); void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
// Promotion failure stats // Promotion failure stats
size_t promotion_failure_size() { return promotion_failure_size(); } void register_promotion_failure(size_t sz) {
void log_promotion_failure(size_t sz) { _promotion_failed_info.register_copy_failure(sz);
if (_promotion_failure_size == 0) {
_promotion_failure_size = sz;
}
} }
void print_and_clear_promotion_failure_size(); PromotionFailedInfo& promotion_failed_info() {
return _promotion_failed_info;
}
bool promotion_failed() {
return _promotion_failed_info.has_failed();
}
void print_promotion_failure_size();
#if TASKQUEUE_STATS #if TASKQUEUE_STATS
TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; } TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; }
@ -337,6 +342,8 @@ class ParNewGeneration: public DefNewGeneration {
// word being overwritten with a self-forwarding-pointer. // word being overwritten with a self-forwarding-pointer.
void preserve_mark_if_necessary(oop obj, markOop m); void preserve_mark_if_necessary(oop obj, markOop m);
void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer);
protected: protected:
bool _survivor_overflow; bool _survivor_overflow;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -35,6 +35,8 @@
#include "gc_implementation/parallelScavenge/psPromotionManager.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp"
#include "gc_implementation/parallelScavenge/vmPSOperations.hpp" #include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gcWhen.hpp"
#include "memory/gcLocker.inline.hpp" #include "memory/gcLocker.inline.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
@ -642,6 +644,29 @@ void ParallelScavengeHeap::prepare_for_verify() {
ensure_parsability(false); // no need to retire TLABs for verification ensure_parsability(false); // no need to retire TLABs for verification
} }
PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
PSOldGen* old = old_gen();
HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
PSYoungGen* young = young_gen();
VirtualSpaceSummary young_summary(young->reserved().start(),
(HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
MutableSpace* eden = young_gen()->eden_space();
SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
MutableSpace* from = young_gen()->from_space();
SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
MutableSpace* to = young_gen()->to_space();
SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
VirtualSpaceSummary heap_summary = create_heap_space_summary();
return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
}
void ParallelScavengeHeap::print_on(outputStream* st) const { void ParallelScavengeHeap::print_on(outputStream* st) const {
young_gen()->print_on(st); young_gen()->print_on(st);
old_gen()->print_on(st); old_gen()->print_on(st);
@ -706,6 +731,12 @@ void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
} }
} }
void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
const PSHeapSummary& heap_summary = create_ps_heap_summary();
const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
gc_tracer->report_gc_heap_summary(when, heap_summary, metaspace_summary);
}
ParallelScavengeHeap* ParallelScavengeHeap::heap() { ParallelScavengeHeap* ParallelScavengeHeap::heap() {
assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap"); assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -30,14 +30,18 @@
#include "gc_implementation/parallelScavenge/psOldGen.hpp" #include "gc_implementation/parallelScavenge/psOldGen.hpp"
#include "gc_implementation/parallelScavenge/psYoungGen.hpp" #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
#include "gc_implementation/shared/gcPolicyCounters.hpp" #include "gc_implementation/shared/gcPolicyCounters.hpp"
#include "gc_implementation/shared/gcWhen.hpp"
#include "gc_interface/collectedHeap.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp"
#include "utilities/ostream.hpp" #include "utilities/ostream.hpp"
class AdjoiningGenerations; class AdjoiningGenerations;
class CollectorPolicy;
class GCHeapSummary;
class GCTaskManager; class GCTaskManager;
class PSAdaptiveSizePolicy;
class GenerationSizer; class GenerationSizer;
class CollectorPolicy; class CollectorPolicy;
class PSAdaptiveSizePolicy;
class PSHeapSummary;
class ParallelScavengeHeap : public CollectedHeap { class ParallelScavengeHeap : public CollectedHeap {
friend class VMStructs; friend class VMStructs;
@ -65,6 +69,8 @@ class ParallelScavengeHeap : public CollectedHeap {
static GCTaskManager* _gc_task_manager; // The task manager. static GCTaskManager* _gc_task_manager; // The task manager.
void trace_heap(GCWhen::Type when, GCTracer* tracer);
protected: protected:
static inline size_t total_invocations(); static inline size_t total_invocations();
HeapWord* allocate_new_tlab(size_t size); HeapWord* allocate_new_tlab(size_t size);
@ -219,6 +225,7 @@ class ParallelScavengeHeap : public CollectedHeap {
jlong millis_since_last_gc(); jlong millis_since_last_gc();
void prepare_for_verify(); void prepare_for_verify();
PSHeapSummary create_ps_heap_summary();
virtual void print_on(outputStream* st) const; virtual void print_on(outputStream* st) const;
virtual void print_on_error(outputStream* st) const; virtual void print_on_error(outputStream* st) const;
virtual void print_gc_threads_on(outputStream* st) const; virtual void print_gc_threads_on(outputStream* st) const;

View File

@ -27,6 +27,8 @@
#include "code/codeCache.hpp" #include "code/codeCache.hpp"
#include "gc_implementation/parallelScavenge/pcTasks.hpp" #include "gc_implementation/parallelScavenge/pcTasks.hpp"
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp" #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_interface/collectedHeap.hpp" #include "gc_interface/collectedHeap.hpp"
#include "memory/universe.hpp" #include "memory/universe.hpp"
#include "oops/objArrayKlass.inline.hpp" #include "oops/objArrayKlass.inline.hpp"
@ -48,8 +50,8 @@ void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
ResourceMark rm; ResourceMark rm;
NOT_PRODUCT(TraceTime tm("ThreadRootsMarkingTask", NOT_PRODUCT(GCTraceTime tm("ThreadRootsMarkingTask",
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
@ -77,8 +79,8 @@ void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) { void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
assert(Universe::heap()->is_gc_active(), "called outside gc"); assert(Universe::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(TraceTime tm("MarkFromRootsTask", NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask",
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
@ -148,8 +150,8 @@ void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
{ {
assert(Universe::heap()->is_gc_active(), "called outside gc"); assert(Universe::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(TraceTime tm("RefProcTask", NOT_PRODUCT(GCTraceTime tm("RefProcTask",
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
@ -204,8 +206,8 @@ StealMarkingTask::StealMarkingTask(ParallelTaskTerminator* t) :
void StealMarkingTask::do_it(GCTaskManager* manager, uint which) { void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
assert(Universe::heap()->is_gc_active(), "called outside gc"); assert(Universe::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(TraceTime tm("StealMarkingTask", NOT_PRODUCT(GCTraceTime tm("StealMarkingTask",
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
@ -237,8 +239,8 @@ StealRegionCompactionTask::StealRegionCompactionTask(ParallelTaskTerminator* t):
void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) { void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
assert(Universe::heap()->is_gc_active(), "called outside gc"); assert(Universe::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(TraceTime tm("StealRegionCompactionTask", NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask",
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
@ -304,8 +306,8 @@ UpdateDensePrefixTask::UpdateDensePrefixTask(
void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) { void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
NOT_PRODUCT(TraceTime tm("UpdateDensePrefixTask", NOT_PRODUCT(GCTraceTime tm("UpdateDensePrefixTask",
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
@ -319,8 +321,8 @@ void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) { void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
assert(Universe::heap()->is_gc_active(), "called outside gc"); assert(Universe::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(TraceTime tm("DrainStacksCompactionTask", NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask",
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);

View File

@ -34,6 +34,10 @@
#include "gc_implementation/parallelScavenge/psOldGen.hpp" #include "gc_implementation/parallelScavenge/psOldGen.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp"
#include "gc_implementation/parallelScavenge/psYoungGen.hpp" #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp"
#include "gc_implementation/shared/markSweep.hpp" #include "gc_implementation/shared/markSweep.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp" #include "gc_implementation/shared/spaceDecorator.hpp"
@ -108,8 +112,12 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
} }
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
GCCause::Cause gc_cause = heap->gc_cause();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
GCCause::Cause gc_cause = heap->gc_cause();
_gc_timer->register_gc_start(os::elapsed_counter());
_gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
PSAdaptiveSizePolicy* size_policy = heap->size_policy(); PSAdaptiveSizePolicy* size_policy = heap->size_policy();
// The scope of casr should end after code that can change // The scope of casr should end after code that can change
@ -131,6 +139,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
heap->print_heap_before_gc(); heap->print_heap_before_gc();
heap->trace_heap_before_gc(_gc_tracer);
// Fill in TLABs // Fill in TLABs
heap->accumulate_statistics_all_tlabs(); heap->accumulate_statistics_all_tlabs();
@ -147,7 +156,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
old_gen->verify_object_start_array(); old_gen->verify_object_start_array();
} }
heap->pre_full_gc_dump(); heap->pre_full_gc_dump(_gc_timer);
// Filled in below to track the state of the young gen after the collection. // Filled in below to track the state of the young gen after the collection.
bool eden_empty; bool eden_empty;
@ -159,7 +168,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty); GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
TraceCollectorStats tcs(counters()); TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
@ -374,13 +383,18 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
heap->print_heap_after_gc(); heap->print_heap_after_gc();
heap->trace_heap_after_gc(_gc_tracer);
heap->post_full_gc_dump(); heap->post_full_gc_dump(_gc_timer);
#ifdef TRACESPINNING #ifdef TRACESPINNING
ParallelTaskTerminator::print_termination_counts(); ParallelTaskTerminator::print_termination_counts();
#endif #endif
_gc_timer->register_gc_end(os::elapsed_counter());
_gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
return true; return true;
} }
@ -498,7 +512,7 @@ void PSMarkSweep::deallocate_stacks() {
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them // Recursively traverse all live objects and mark them
TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty); GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer);
trace(" 1"); trace(" 1");
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@ -531,8 +545,10 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Process reference objects found during marking // Process reference objects found during marking
{ {
ref_processor()->setup_policy(clear_all_softrefs); ref_processor()->setup_policy(clear_all_softrefs);
ref_processor()->process_discovered_references( const ReferenceProcessorStats& stats =
is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL); ref_processor()->process_discovered_references(
is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer);
gc_tracer()->report_gc_reference_stats(stats);
} }
// This is the point where the entire marking should have completed. // This is the point where the entire marking should have completed.
@ -552,11 +568,12 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Clean up unreferenced symbols in symbol table. // Clean up unreferenced symbols in symbol table.
SymbolTable::unlink(); SymbolTable::unlink();
_gc_tracer->report_object_count_after_gc(is_alive_closure());
} }
void PSMarkSweep::mark_sweep_phase2() { void PSMarkSweep::mark_sweep_phase2() {
TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty); GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer);
trace("2"); trace("2");
// Now all live objects are marked, compute the new object addresses. // Now all live objects are marked, compute the new object addresses.
@ -586,7 +603,7 @@ static PSAlwaysTrueClosure always_true;
void PSMarkSweep::mark_sweep_phase3() { void PSMarkSweep::mark_sweep_phase3() {
// Adjust the pointers to reflect the new locations // Adjust the pointers to reflect the new locations
TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty); GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer);
trace("3"); trace("3");
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@ -629,7 +646,7 @@ void PSMarkSweep::mark_sweep_phase3() {
void PSMarkSweep::mark_sweep_phase4() { void PSMarkSweep::mark_sweep_phase4() {
EventMark m("4 compact heap"); EventMark m("4 compact heap");
TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty); GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer);
trace("4"); trace("4");
// All pointers are now adjusted, move objects accordingly // All pointers are now adjusted, move objects accordingly

Some files were not shown because too many files have changed in this diff Show More