diff --git a/.hgtags b/.hgtags
index 6178c6775a6..982132f3729 100644
--- a/.hgtags
+++ b/.hgtags
@@ -208,3 +208,5 @@ bcebd3fdefc91abb9d7fa0c5af6211b3f8720da6 jdk8-b83
d7ad0dfaa41151bd3a9ae46725b0aec3730a9cd0 jdk8-b84
1872c12529090e1c1dbf567f02ad7ae6231b8f0c jdk8-b85
da9a4c9312816451884aa6db6f18be51a07bff13 jdk8-b86
+5ebf6c63714de2c9dcf831074086d31daec819df jdk8-b87
+e517701a4d0e25ae9c7945bca6e1762a8c5d8aa6 jdk8-b88
diff --git a/.hgtags-top-repo b/.hgtags-top-repo
index ab78580039e..b639be15de4 100644
--- a/.hgtags-top-repo
+++ b/.hgtags-top-repo
@@ -208,3 +208,5 @@ fd1a5574cf68af24bfd52decc37ac6361afb278a jdk8-b78
01f631f89fa392b4e484d0812c40ea8f9d2353aa jdk8-b84
7fc358f5943676b82f1dccd3152b1ac07d92e38b jdk8-b85
df9b5240f0a76c91cfe1a5b39da4d08df56e05be jdk8-b86
+b9415faa7066a4d3b16d466556d5428446918d95 jdk8-b87
+e1a929afcfc492470d50be0b6b0e8dc77d3760b9 jdk8-b88
diff --git a/common/autoconf/configure.ac b/common/autoconf/configure.ac
index 5e632c4ecb8..fcaf12fc422 100644
--- a/common/autoconf/configure.ac
+++ b/common/autoconf/configure.ac
@@ -166,6 +166,7 @@ PLATFORM_SETUP_OPENJDK_TARGET_ENDIANNESS
TOOLCHAIN_SETUP_COMPILER_FLAGS_FOR_LIBS
TOOLCHAIN_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION
TOOLCHAIN_SETUP_COMPILER_FLAGS_FOR_JDK
+TOOLCHAIN_SETUP_COMPILER_FLAGS_MISC
# Setup debug symbols (need objcopy from the toolchain for that)
JDKOPT_SETUP_DEBUG_SYMBOLS
diff --git a/common/autoconf/generated-configure.sh b/common/autoconf/generated-configure.sh
index ece735837c6..832b6410016 100644
--- a/common/autoconf/generated-configure.sh
+++ b/common/autoconf/generated-configure.sh
@@ -639,6 +639,8 @@ CXXFLAGS_DEBUG_SYMBOLS
CFLAGS_DEBUG_SYMBOLS
ZIP_DEBUGINFO_FILES
ENABLE_DEBUG_SYMBOLS
+COMPILER_SUPPORTS_TARGET_BITS_FLAG
+ZERO_ARCHFLAG
LDFLAGS_CXX_JDK
LDFLAGS_JDKEXE_SUFFIX
LDFLAGS_JDKLIB_SUFFIX
@@ -679,6 +681,7 @@ LIBRARY_PREFIX
STATIC_LIBRARY
SHARED_LIBRARY
OBJ_SUFFIX
+COMPILER_NAME
LIPO
ac_ct_OBJDUMP
OBJDUMP
@@ -835,7 +838,6 @@ SYS_ROOT
PATH_SEP
SRC_ROOT
ZERO_ARCHDEF
-ZERO_ARCHFLAG
DEFINE_CROSS_COMPILE_ARCH
LP64
OPENJDK_TARGET_OS_API_DIR
@@ -3714,6 +3716,15 @@ fi
+
+# TOOLCHAIN_COMPILER_CHECK_ARGUMENTS([ARGUMENT], [RUN-IF-TRUE],
+# [RUN-IF-FALSE])
+# ------------------------------------------------------------
+# Check that the c and c++ compilers support an argument
+
+
+
+
#
# Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -3764,7 +3775,7 @@ fi
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
-DATE_WHEN_GENERATED=1365493306
+DATE_WHEN_GENERATED=1367502949
###############################################################################
#
@@ -7099,17 +7110,6 @@ $as_echo "$COMPILE_TYPE" >&6; }
fi
- # Some Zero and Shark settings.
- # ZERO_ARCHFLAG tells the compiler which mode to build for
- case "${OPENJDK_TARGET_CPU}" in
- s390)
- ZERO_ARCHFLAG="-m31"
- ;;
- *)
- ZERO_ARCHFLAG="-m${OPENJDK_TARGET_CPU_BITS}"
- esac
-
-
# ZERO_ARCHDEF is used to enable architecture-specific code
case "${OPENJDK_TARGET_CPU}" in
ppc*) ZERO_ARCHDEF=PPC ;;
@@ -28344,6 +28344,7 @@ done
# (The JVM can use 32 or 64 bit Java pointers but that decision
# is made at runtime.)
#
+
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
# Always specify -m flags on Solaris
@@ -28805,6 +28806,7 @@ fi
+
# The (cross) compiler is now configured, we can now test capabilities
# of the target platform.
@@ -29268,6 +29270,161 @@ esac
+
+ # Some Zero and Shark settings.
+ # ZERO_ARCHFLAG tells the compiler which mode to build for
+ case "${OPENJDK_TARGET_CPU}" in
+ s390)
+ ZERO_ARCHFLAG="-m31"
+ ;;
+ *)
+ ZERO_ARCHFLAG="-m${OPENJDK_TARGET_CPU_BITS}"
+ esac
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if compiler supports \"$ZERO_ARCHFLAG\"" >&5
+$as_echo_n "checking if compiler supports \"$ZERO_ARCHFLAG\"... " >&6; }
+ supports=yes
+
+ saved_cflags="$CFLAGS"
+ CFLAGS="$CFLAGS $ZERO_ARCHFLAG"
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+ int i;
+
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+ supports=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+ CFLAGS="$saved_cflags"
+
+ saved_cxxflags="$CXXFLAGS"
+ CXXFLAGS="$CXXFLAG $ZERO_ARCHFLAG"
+ ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+ int i;
+
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+else
+ supports=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+ CXXFLAGS="$saved_cxxflags"
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $supports" >&5
+$as_echo "$supports" >&6; }
+ if test "x$supports" = "xyes" ; then
+ :
+ else
+ ZERO_ARCHFLAG=""
+ fi
+
+
+
+ # Check that the compiler supports -mX flags
+ # Set COMPILER_SUPPORTS_TARGET_BITS_FLAG to 'true' if it does
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if compiler supports \"-m${OPENJDK_TARGET_CPU_BITS}\"" >&5
+$as_echo_n "checking if compiler supports \"-m${OPENJDK_TARGET_CPU_BITS}\"... " >&6; }
+ supports=yes
+
+ saved_cflags="$CFLAGS"
+ CFLAGS="$CFLAGS -m${OPENJDK_TARGET_CPU_BITS}"
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+ int i;
+
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+ supports=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+ CFLAGS="$saved_cflags"
+
+ saved_cxxflags="$CXXFLAGS"
+ CXXFLAGS="$CXXFLAG -m${OPENJDK_TARGET_CPU_BITS}"
+ ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+ int i;
+
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+else
+ supports=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+ CXXFLAGS="$saved_cxxflags"
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $supports" >&5
+$as_echo "$supports" >&6; }
+ if test "x$supports" = "xyes" ; then
+ COMPILER_SUPPORTS_TARGET_BITS_FLAG=true
+ else
+ COMPILER_SUPPORTS_TARGET_BITS_FLAG=false
+ fi
+
+
+
+
# Setup debug symbols (need objcopy from the toolchain for that)
#
@@ -29446,7 +29603,6 @@ fi
-
###############################################################################
#
# Check for X Windows
diff --git a/common/autoconf/platform.m4 b/common/autoconf/platform.m4
index 067650688c3..889b99ed727 100644
--- a/common/autoconf/platform.m4
+++ b/common/autoconf/platform.m4
@@ -333,17 +333,6 @@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS],
fi
AC_SUBST(DEFINE_CROSS_COMPILE_ARCH)
- # Some Zero and Shark settings.
- # ZERO_ARCHFLAG tells the compiler which mode to build for
- case "${OPENJDK_TARGET_CPU}" in
- s390)
- ZERO_ARCHFLAG="-m31"
- ;;
- *)
- ZERO_ARCHFLAG="-m${OPENJDK_TARGET_CPU_BITS}"
- esac
- AC_SUBST(ZERO_ARCHFLAG)
-
# ZERO_ARCHDEF is used to enable architecture-specific code
case "${OPENJDK_TARGET_CPU}" in
ppc*) ZERO_ARCHDEF=PPC ;;
@@ -444,6 +433,7 @@ AC_DEFUN_ONCE([PLATFORM_SETUP_OPENJDK_TARGET_BITS],
# (The JVM can use 32 or 64 bit Java pointers but that decision
# is made at runtime.)
#
+
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
# Always specify -m flags on Solaris
PLATFORM_SET_COMPILER_TARGET_BITS_FLAGS
diff --git a/common/autoconf/spec.gmk.in b/common/autoconf/spec.gmk.in
index 92b0c83017e..2172ee8ef68 100644
--- a/common/autoconf/spec.gmk.in
+++ b/common/autoconf/spec.gmk.in
@@ -300,6 +300,9 @@ MACOSX_VERSION_MIN=@MACOSX_VERSION_MIN@
# CC is gcc and others behaving reasonably similar.
# CL is cl.exe only.
COMPILER_TYPE:=@COMPILER_TYPE@
+COMPILER_NAME:=@COMPILER_NAME@
+
+COMPILER_SUPPORTS_TARGET_BITS_FLAG=@COMPILER_SUPPORTS_TARGET_BITS_FLAG@
CC_OUT_OPTION:=@CC_OUT_OPTION@
EXE_OUT_OPTION:=@EXE_OUT_OPTION@
diff --git a/common/autoconf/toolchain.m4 b/common/autoconf/toolchain.m4
index 893fb16d674..779f86117ab 100644
--- a/common/autoconf/toolchain.m4
+++ b/common/autoconf/toolchain.m4
@@ -573,6 +573,7 @@ else
fi
fi
+AC_SUBST(COMPILER_NAME)
AC_SUBST(OBJ_SUFFIX)
AC_SUBST(SHARED_LIBRARY)
AC_SUBST(STATIC_LIBRARY)
@@ -1030,3 +1031,61 @@ AC_SUBST(LDFLAGS_JDKLIB_SUFFIX)
AC_SUBST(LDFLAGS_JDKEXE_SUFFIX)
AC_SUBST(LDFLAGS_CXX_JDK)
])
+
+
+# TOOLCHAIN_COMPILER_CHECK_ARGUMENTS([ARGUMENT], [RUN-IF-TRUE],
+# [RUN-IF-FALSE])
+# ------------------------------------------------------------
+# Check that the c and c++ compilers support an argument
+AC_DEFUN([TOOLCHAIN_COMPILER_CHECK_ARGUMENTS],
+[
+ AC_MSG_CHECKING([if compiler supports "$1"])
+ supports=yes
+
+ saved_cflags="$CFLAGS"
+ CFLAGS="$CFLAGS $1"
+ AC_LANG_PUSH([C])
+ AC_COMPILE_IFELSE([
+ AC_LANG_SOURCE([[int i;]])
+ ], [], [supports=no])
+ AC_LANG_POP([C])
+ CFLAGS="$saved_cflags"
+
+ saved_cxxflags="$CXXFLAGS"
+ CXXFLAGS="$CXXFLAG $1"
+ AC_LANG_PUSH([C++])
+ AC_COMPILE_IFELSE([
+ AC_LANG_SOURCE([[int i;]])
+ ], [], [supports=no])
+ AC_LANG_POP([C++])
+ CXXFLAGS="$saved_cxxflags"
+
+ AC_MSG_RESULT([$supports])
+ if test "x$supports" = "xyes" ; then
+ m4_ifval([$2], [$2], [:])
+ else
+ m4_ifval([$3], [$3], [:])
+ fi
+])
+
+AC_DEFUN_ONCE([TOOLCHAIN_SETUP_COMPILER_FLAGS_MISC],
+[
+ # Some Zero and Shark settings.
+ # ZERO_ARCHFLAG tells the compiler which mode to build for
+ case "${OPENJDK_TARGET_CPU}" in
+ s390)
+ ZERO_ARCHFLAG="-m31"
+ ;;
+ *)
+ ZERO_ARCHFLAG="-m${OPENJDK_TARGET_CPU_BITS}"
+ esac
+ TOOLCHAIN_COMPILER_CHECK_ARGUMENTS([$ZERO_ARCHFLAG], [], [ZERO_ARCHFLAG=""])
+ AC_SUBST(ZERO_ARCHFLAG)
+
+ # Check that the compiler supports -mX flags
+ # Set COMPILER_SUPPORTS_TARGET_BITS_FLAG to 'true' if it does
+ TOOLCHAIN_COMPILER_CHECK_ARGUMENTS([-m${OPENJDK_TARGET_CPU_BITS}],
+ [COMPILER_SUPPORTS_TARGET_BITS_FLAG=true],
+ [COMPILER_SUPPORTS_TARGET_BITS_FLAG=false])
+ AC_SUBST(COMPILER_SUPPORTS_TARGET_BITS_FLAG)
+])
diff --git a/common/makefiles/NativeCompilation.gmk b/common/makefiles/NativeCompilation.gmk
index 838a62ce111..2e084ea410c 100644
--- a/common/makefiles/NativeCompilation.gmk
+++ b/common/makefiles/NativeCompilation.gmk
@@ -95,10 +95,28 @@ define add_native_source
$$($1_$2_OBJ) : $2
ifeq ($(COMPILER_TYPE),CC)
$$(call COMPILING_MSG,$2,$$($1_TARGET))
+ # The Sun studio compiler doesn't output the full path to the object file in the
+ # generated deps files. Fixing it with sed. If compiling assembly, don't try this.
+ ifeq ($(COMPILER_NAME)$$(filter %.s,$2),ossc)
+ $$($1_$2_COMP) $$($1_$2_FLAGS) $$($1_$2_DEP_FLAG) $$($1_$2_DEP).tmp $(CC_OUT_OPTION)$$($1_$2_OBJ) $2
+ $(SED) 's|^$$(@F):|$$@:|' $$($1_$2_DEP).tmp > $$($1_$2_DEP)
+ else
$$($1_$2_COMP) $$($1_$2_FLAGS) $$($1_$2_DEP_FLAG) $$($1_$2_DEP) $(CC_OUT_OPTION)$$($1_$2_OBJ) $2
endif
+ endif
+ # The Visual Studio compiler lacks a feature for generating make dependencies, but by
+ # setting -showIncludes, all included files are printed. These are filtered out and
+ # parsed into make dependences.
ifeq ($(COMPILER_TYPE),CL)
- $$($1_$2_COMP) $$($1_$2_FLAGS) $$($1_$2_DEBUG_OUT_FLAGS) $(CC_OUT_OPTION)$$($1_$2_OBJ) $2
+ $$($1_$2_COMP) $$($1_$2_FLAGS) -showIncludes $$($1_$2_DEBUG_OUT_FLAGS) $(CC_OUT_OPTION)$$($1_$2_OBJ) $2 | $(TEE) $$($1_$2_DEP).raw | $(GREP) -v "^Note: including file:"
+ ($(ECHO) $$@: \\ \
+ && $(SED) -e '/^Note: including file:/!d' \
+ -e 's|Note: including file: *||' \
+ -e 's|\\|/|g' \
+ -e 's|^\([a-zA-Z]\):|/cygdrive/\1|g' \
+ -e '/$(subst /,\/,$(TOPDIR))/!d' \
+ -e 's|$$$$| \\|g' \
+ $$($1_$2_DEP).raw) > $$($1_$2_DEP)
endif
endif
endef
@@ -393,6 +411,8 @@ define SetupNativeCompilation
$1_EXTRA_LDFLAGS+="-implib:$$($1_OBJECT_DIR)/$$($1_LIBRARY).lib"
endif
+ $1_EXTRA_LDFLAGS_SUFFIX += $(GLOBAL_LDFLAGS_SUFFIX)
+
ifneq (,$$($1_DEBUG_SYMBOLS))
ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
ifeq ($(OPENJDK_TARGET_OS), windows)
@@ -531,6 +551,8 @@ define SetupNativeCompilation
endif
endif
+ $1_EXTRA_LDFLAGS_SUFFIX += $(GLOBAL_LDFLAGS_SUFFIX)
+
$$($1_TARGET) : $$($1_EXPECTED_OBJS) $$($1_RES) $$($1_GEN_MANIFEST)
$$(call LINKING_EXE_MSG,$$($1_BASENAME))
$$($1_LDEXE) $$($1_LDFLAGS) $$($1_EXTRA_LDFLAGS) $(EXE_OUT_OPTION)$$($1_TARGET) \
diff --git a/common/makefiles/javadoc/CORE_PKGS.gmk b/common/makefiles/javadoc/CORE_PKGS.gmk
index 43c7a274818..aede99422ff 100644
--- a/common/makefiles/javadoc/CORE_PKGS.gmk
+++ b/common/makefiles/javadoc/CORE_PKGS.gmk
@@ -142,6 +142,7 @@ CORE_PKGS = \
java.util.prefs \
java.util.regex \
java.util.spi \
+ java.util.stream \
java.util.zip \
javax.accessibility \
javax.activation \
diff --git a/common/makefiles/javadoc/Javadoc.gmk b/common/makefiles/javadoc/Javadoc.gmk
index f944a7e0f46..9fe25223fb8 100644
--- a/common/makefiles/javadoc/Javadoc.gmk
+++ b/common/makefiles/javadoc/Javadoc.gmk
@@ -390,6 +390,17 @@ $(COREAPI_OPTIONS_FILE): $(COREAPI_OVERVIEW)
$(call OptionPair,-tag,specdefault:X) ; \
$(call OptionPair,-tag,Note:X) ; \
$(call OptionPair,-tag,ToDo:X) ; \
+ $(call OptionPair,-tag,apiNote:a:API Note:) ; \
+ $(call OptionPair,-tag,implSpec:a:Implementation Requirements:) ; \
+ $(call OptionPair,-tag,implNote:a:Implementation Note:) ; \
+ $(call OptionPair,-tag,param) ; \
+ $(call OptionPair,-tag,return) ; \
+ $(call OptionPair,-tag,throws) ; \
+ $(call OptionPair,-tag,since) ; \
+ $(call OptionPair,-tag,version) ; \
+ $(call OptionPair,-tag,serialData) ; \
+ $(call OptionPair,-tag,factory) ; \
+ $(call OptionPair,-tag,see) ; \
$(call OptionPair,-tag,$(TAG_JLS)) ; \
$(call OptionOnly,-splitIndex) ; \
$(call OptionPair,-overview,$(COREAPI_OVERVIEW)) ; \
diff --git a/common/nb_native/nbproject/configurations.xml b/common/nb_native/nbproject/configurations.xml
new file mode 100644
index 00000000000..b976a43ff1e
--- /dev/null
+++ b/common/nb_native/nbproject/configurations.xml
@@ -0,0 +1,41671 @@
+
+
-The C2 compiler replay is a function to repeat the compiling process from a crashed java process in compiled method
+The compiler replay is a function to repeat the compiling process from a crashed java process in compiled method
This function only exists in debug version of VM
-First, use SA to attach to the core file, if suceeded, do - clhsdb>dumpreplaydata | -a |[> replay.txt] + +First, use SA to attach to the core file, if succeeded, do + hsdb> dumpreplaydata <address> | -a | <thread_id> [> replay.txt] create file replay.txt, address is address of Method, or nmethod(CodeBlob) - clhsdb>buildreplayjars [all | boot | app] + hsdb> buildreplayjars [all | boot | app] create files: all: app.jar, boot.jar @@ -26,16 +26,16 @@ First, use SA to attach to the core file, if suceeded, do app.jar exit SA now. Second, use the obtained replay text file, replay.txt and jar files, app.jar and boot.jar, using debug version of java - java -Xbootclasspath/p:boot.jar -cp app.jar -XX:ReplayDataFile=--XX:+ReplayCompiles .... + java -Xbootclasspath/p:boot.jar -cp app.jar -XX:ReplayDataFile=<datafile> -XX:+ReplayCompiles .... This will replay the compiling process. With ReplayCompiles, the replay will recompile all the methods in app.jar, and in boot.jar to emulate the process in java app. notes: 1) Most time, we don't need the boot.jar which is the classes loaded from JDK. It will be only modified when an agent(JVMDI) is running and modifies the classes. - 2) If encounter error as " " not found, that means the SA is using a VMStructs which is different from the one with corefile. In this case, SA has a utility tool vmstructsdump which is located at agent/src/os/ /proc/ + 2) If encounter error as "<flag>" not found, that means the SA is using a VMStructs which is different from the one with corefile. In this case, SA has a utility tool vmstructsdump which is located at agent/src/os/<os>/proc/<os_platform> Use this tool to dump VM type library: - vmstructsdump libjvm.so > .db + vmstructsdump libjvm.so > <type_name>.db - set env SA_TYPEDB= .db (refer different shell for set envs) + set env SA_TYPEDB=<type_name>.db (refer different shell for set envs) diff --git a/hotspot/agent/doc/clhsdb.html b/hotspot/agent/doc/clhsdb.html index f1c43fd1e5f..ad43bf7ee57 100644 --- a/hotspot/agent/doc/clhsdb.html +++ b/hotspot/agent/doc/clhsdb.html @@ -15,7 +15,7 @@ GUI tools. Command line HSDB (CLHSDB) tool is alternative to SA GUI tool HSDB. There is also JavaScript based SA command line interface called jsdb. But, CLHSDB supports Unix shell-like (or dbx/gdb-like) command line interface with -support for output redirection/appending (familiar >, >>), command history and so on. +support for output redirection/appending (familiar >, >>), command history and so on. Each CLHSDB command can have zero or more arguments and optionally end with output redirection (or append) to a file. Commands may be stored in a file and run using source command. help command prints usage message for all supported commands (or a specific command) @@ -49,7 +49,7 @@ Available commands: dumpheap [ file ] dump heap in hprof binary format dumpideal -a | id dump ideal graph like debug flag -XX:+PrintIdeal dumpilt -a | id dump inline tree for C2 compilation - dumpreplaydata
| -a |[>replay.txt] dump replay data into a file + dumpreplaydata <address> | -a | <thread_id> [>replay.txt] dump replay data into a file echo [ true | false ] turn on/off command echo mode examine [ address/count ] | [ address,address] show contents of memory from given address field [ type [ name fieldtype isStatic offset address ] ] print info about a field of HotSpot type @@ -96,11 +96,11 @@ Available commands: JavaScript integration
-Few CLHSDB commands are already implemented in JavaScript. It is possible to extend CLHSDB command set +
Few CLHSDB commands are already implemented in JavaScript. It is possible to extend CLHSDB command set by implementing more commands in a JavaScript file and by loading it by jsload command. jseval command may be used to evaluate arbitrary JavaScript expression from a string. Any JavaScript function may be exposed as a CLHSDB command by registering it using JavaScript
@@ -127,11 +127,11 @@ hsdb> jsload test.jsregisterCommand
-function. This function accepts command name, usage and name of the JavaScript implementation function +function. This function accepts command name, usage and name of the JavaScript implementation function as arguments.C2 Compilation Replay
+Compilation Replay
When a java process crashes in compiled method, usually a core file is saved. -The C2 replay function can reproduce the compiling process in the core. -c2replay.html +The replay function can reproduce the compiling process in the core. +cireplay.html diff --git a/hotspot/agent/src/os/bsd/MacosxDebuggerLocal.m b/hotspot/agent/src/os/bsd/MacosxDebuggerLocal.m index d5c23a012df..1234645c257 100644 --- a/hotspot/agent/src/os/bsd/MacosxDebuggerLocal.m +++ b/hotspot/agent/src/os/bsd/MacosxDebuggerLocal.m @@ -204,7 +204,7 @@ Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_lookupByName0( jstring objectName, jstring symbolName) { struct ps_prochandle* ph = get_proc_handle(env, this_obj); - if (ph->core != NULL) { + if (ph != NULL && ph->core != NULL) { return lookupByNameIncore(env, ph, this_obj, objectName, symbolName); } @@ -238,10 +238,13 @@ JNIEXPORT jobject JNICALL Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_loo const char* sym = NULL; struct ps_prochandle* ph = get_proc_handle(env, this_obj); - sym = symbol_for_pc(ph, (uintptr_t) addr, &offset); - if (sym == NULL) return 0; - return (*env)->CallObjectMethod(env, this_obj, createClosestSymbol_ID, + if (ph != NULL && ph->core != NULL) { + sym = symbol_for_pc(ph, (uintptr_t) addr, &offset); + if (sym == NULL) return 0; + return (*env)->CallObjectMethod(env, this_obj, createClosestSymbol_ID, (*env)->NewStringUTF(env, sym), (jlong)offset); + } + return 0; } /** called from Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_readBytesFromProcess0 */ @@ -279,7 +282,7 @@ Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_readBytesFromProcess0( jbyteArray array; struct ps_prochandle* ph = get_proc_handle(env, this_obj); - if (ph->core != NULL) { + if (ph != NULL && ph->core != NULL) { return readBytesFromCore(env, ph, this_obj, addr, numBytes); } @@ -394,9 +397,9 @@ bool fill_java_threads(JNIEnv* env, jobject this_obj, struct ps_prochandle* ph) /* For core file only, called from * Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_getThreadIntegerRegisterSet0 */ -jlongArray getThreadIntegerRegisterSetFromCore(JNIEnv *env, jobject this_obj, long lwp_id) { +jlongArray getThreadIntegerRegisterSetFromCore(JNIEnv *env, jobject this_obj, long lwp_id, struct ps_prochandle* ph) { if (!_threads_filled) { - if (!fill_java_threads(env, this_obj, get_proc_handle(env, this_obj))) { + if (!fill_java_threads(env, this_obj, ph)) { throw_new_debugger_exception(env, "Failed to fill in threads"); return 0; } else { @@ -409,7 +412,6 @@ jlongArray getThreadIntegerRegisterSetFromCore(JNIEnv *env, jobject this_obj, lo jlongArray array; jlong *regs; - struct ps_prochandle* ph = get_proc_handle(env, this_obj); if (get_lwp_regs(ph, lwp_id, &gregs) != true) { THROW_NEW_DEBUGGER_EXCEPTION_("get_thread_regs failed for a lwp", 0); } @@ -521,8 +523,8 @@ Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_getThreadIntegerRegisterSet0( print_debug("getThreadRegisterSet0 called\n"); struct ps_prochandle* ph = get_proc_handle(env, this_obj); - if (ph->core != NULL) { - return getThreadIntegerRegisterSetFromCore(env, this_obj, thread_id); + if (ph != NULL && ph->core != NULL) { + return getThreadIntegerRegisterSetFromCore(env, this_obj, thread_id, ph); } kern_return_t result; @@ -705,8 +707,8 @@ JNF_COCOA_ENTER(env); task_t gTask = 0; result = task_for_pid(mach_task_self(), jpid, &gTask); if (result != KERN_SUCCESS) { - print_error("attach: task_for_pid(%d) failed (%d)\n", (int)jpid, result); - THROW_NEW_DEBUGGER_EXCEPTION("Can't attach to the process"); + print_error("attach: task_for_pid(%d) failed: '%s' (%d)\n", (int)jpid, mach_error_string(result), result); + THROW_NEW_DEBUGGER_EXCEPTION("Can't attach to the process. Could be caused by an incorrect pid or lack of privileges."); } putTask(env, this_obj, gTask); diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ci/ciEnv.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ci/ciEnv.java index 871ee414a44..c1e6d503196 100644 --- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ci/ciEnv.java +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ci/ciEnv.java @@ -93,10 +93,11 @@ public class ciEnv extends VMObject { CompileTask task = task(); Method method = task.method(); int entryBci = task.osrBci(); + int compLevel = task.compLevel(); Klass holder = method.getMethodHolder(); out.println("compile " + holder.getName().asString() + " " + OopUtilities.escapeString(method.getName().asString()) + " " + method.getSignature().asString() + " " + - entryBci); + entryBci + " " + compLevel); } } diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java index b0b370cef3c..243a5397a0d 100644 --- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java @@ -78,6 +78,8 @@ public class NMethod extends CodeBlob { current sweep traversal index. */ private static CIntegerField stackTraversalMarkField; + private static CIntegerField compLevelField; + static { VM.registerVMInitializedObserver(new Observer() { public void update(Observable o, Object data) { @@ -113,7 +115,7 @@ public class NMethod extends CodeBlob { osrEntryPointField = type.getAddressField("_osr_entry_point"); lockCountField = type.getJIntField("_lock_count"); stackTraversalMarkField = type.getCIntegerField("_stack_traversal_mark"); - + compLevelField = type.getCIntegerField("_comp_level"); pcDescSize = db.lookupType("PcDesc").getSize(); } @@ -530,7 +532,7 @@ public class NMethod extends CodeBlob { out.println("compile " + holder.getName().asString() + " " + OopUtilities.escapeString(method.getName().asString()) + " " + method.getSignature().asString() + " " + - getEntryBCI()); + getEntryBCI() + " " + getCompLevel()); } @@ -551,4 +553,5 @@ public class NMethod extends CodeBlob { private int getHandlerTableOffset() { return (int) handlerTableOffsetField.getValue(addr); } private int getNulChkTableOffset() { return (int) nulChkTableOffsetField .getValue(addr); } private int getNMethodEndOffset() { return (int) nmethodEndOffsetField .getValue(addr); } + private int getCompLevel() { return (int) compLevelField .getValue(addr); } } diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/CompileTask.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/CompileTask.java index 45ef0a2d27a..0a4273f3ef8 100644 --- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/CompileTask.java +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/CompileTask.java @@ -46,10 +46,12 @@ public class CompileTask extends VMObject { Type type = db.lookupType("CompileTask"); methodField = type.getAddressField("_method"); osrBciField = new CIntField(type.getCIntegerField("_osr_bci"), 0); + compLevelField = new CIntField(type.getCIntegerField("_comp_level"), 0); } private static AddressField methodField; private static CIntField osrBciField; + private static CIntField compLevelField; public CompileTask(Address addr) { super(addr); @@ -63,4 +65,8 @@ public class CompileTask extends VMObject { public int osrBci() { return (int)osrBciField.getValue(getAddress()); } + + public int compLevel() { + return (int)compLevelField.getValue(getAddress()); + } } diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java index 31dc39c5431..c61d58dc429 100644 --- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,19 +49,13 @@ public class Method extends Metadata { Type type = db.lookupType("Method"); constMethod = type.getAddressField("_constMethod"); methodData = type.getAddressField("_method_data"); + methodCounters = type.getAddressField("_method_counters"); methodSize = new CIntField(type.getCIntegerField("_method_size"), 0); accessFlags = new CIntField(type.getCIntegerField("_access_flags"), 0); code = type.getAddressField("_code"); vtableIndex = new CIntField(type.getCIntegerField("_vtable_index"), 0); - if (!VM.getVM().isCore()) { - invocationCounter = new CIntField(type.getCIntegerField("_invocation_counter"), 0); - backedgeCounter = new CIntField(type.getCIntegerField("_backedge_counter"), 0); - } bytecodeOffset = type.getSize(); - interpreterThrowoutCountField = new CIntField(type.getCIntegerField("_interpreter_throwout_count"), 0); - interpreterInvocationCountField = new CIntField(type.getCIntegerField("_interpreter_invocation_count"), 0); - /* interpreterEntry = type.getAddressField("_interpreter_entry"); fromCompiledCodeEntryPoint = type.getAddressField("_from_compiled_code_entry_point"); @@ -80,18 +74,14 @@ public class Method extends Metadata { // Fields private static AddressField constMethod; private static AddressField methodData; + private static AddressField methodCounters; private static CIntField methodSize; private static CIntField accessFlags; private static CIntField vtableIndex; - private static CIntField invocationCounter; - private static CIntField backedgeCounter; private static long bytecodeOffset; private static AddressField code; - private static CIntField interpreterThrowoutCountField; - private static CIntField interpreterInvocationCountField; - // constant method names -
, // Initialized lazily to avoid initialization ordering dependencies between Method and SymbolTable private static Symbol objectInitializerName; @@ -127,6 +117,10 @@ public class Method extends Metadata { Address addr = methodData.getValue(getAddress()); return (MethodData) VMObjectFactory.newObject(MethodData.class, addr); } + public MethodCounters getMethodCounters() { + Address addr = methodCounters.getValue(getAddress()); + return (MethodCounters) VMObjectFactory.newObject(MethodCounters.class, addr); + } /** WARNING: this is in words, not useful in this system; use getObjectSize() instead */ public long getMethodSize() { return methodSize.getValue(this); } public long getMaxStack() { return getConstMethod().getMaxStack(); } @@ -139,16 +133,10 @@ public class Method extends Metadata { public long getCodeSize() { return getConstMethod().getCodeSize(); } public long getVtableIndex() { return vtableIndex.getValue(this); } public long getInvocationCounter() { - if (Assert.ASSERTS_ENABLED) { - Assert.that(!VM.getVM().isCore(), "must not be used in core build"); - } - return invocationCounter.getValue(this); + return getMethodCounters().getInvocationCounter(); } public long getBackedgeCounter() { - if (Assert.ASSERTS_ENABLED) { - Assert.that(!VM.getVM().isCore(), "must not be used in core build"); - } - return backedgeCounter.getValue(this); + return getMethodCounters().getBackedgeCounter(); } // get associated compiled native method, if available, else return null. @@ -369,10 +357,10 @@ public class Method extends Metadata { } public int interpreterThrowoutCount() { - return (int) interpreterThrowoutCountField.getValue(this); + return getMethodCounters().interpreterThrowoutCount(); } public int interpreterInvocationCount() { - return (int) interpreterInvocationCountField.getValue(this); + return getMethodCounters().interpreterInvocationCount(); } } diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/MethodCounters.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/MethodCounters.java new file mode 100644 index 00000000000..854aa818c89 --- /dev/null +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/MethodCounters.java @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2013, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.oops; + +import java.io.*; +import java.util.*; +import sun.jvm.hotspot.debugger.*; +import sun.jvm.hotspot.runtime.*; +import sun.jvm.hotspot.types.*; +import sun.jvm.hotspot.utilities.*; + +public class MethodCounters extends Metadata { + public MethodCounters(Address addr) { + super(addr); + } + + static { + VM.registerVMInitializedObserver(new Observer() { + public void update(Observable o, Object data) { + initialize(VM.getVM().getTypeDataBase()); + } + }); + } + + private static synchronized void initialize(TypeDataBase db) throws WrongTypeException { + Type type = db.lookupType("MethodCounters"); + + interpreterInvocationCountField = new CIntField(type.getCIntegerField("_interpreter_invocation_count"), 0); + interpreterThrowoutCountField = new CIntField(type.getCIntegerField("_interpreter_throwout_count"), 0); + if (!VM.getVM().isCore()) { + invocationCounter = new CIntField(type.getCIntegerField("_invocation_counter"), 0); + backedgeCounter = new CIntField(type.getCIntegerField("_backedge_counter"), 0); + } + } + + private static CIntField interpreterInvocationCountField; + private static CIntField interpreterThrowoutCountField; + private static CIntField invocationCounter; + private static CIntField backedgeCounter; + + public int interpreterInvocationCount() { + return (int) interpreterInvocationCountField.getValue(this); + } + + public int interpreterThrowoutCount() { + return (int) interpreterThrowoutCountField.getValue(this); + } + public long getInvocationCounter() { + if (Assert.ASSERTS_ENABLED) { + Assert.that(!VM.getVM().isCore(), "must not be used in core build"); + } + return invocationCounter.getValue(this); + } + public long getBackedgeCounter() { + if (Assert.ASSERTS_ENABLED) { + Assert.that(!VM.getVM().isCore(), "must not be used in core build"); + } + return backedgeCounter.getValue(this); + } + + public void printValueOn(PrintStream tty) { + } +} + diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java index 675edeba0d3..d9ea364edaa 100644 --- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java @@ -117,8 +117,6 @@ public class JMap extends Tool { mode = MODE_HEAP_SUMMARY; } else if (modeFlag.equals("-histo")) { mode = MODE_HISTOGRAM; - } else if (modeFlag.equals("-permstat")) { - mode = MODE_CLSTATS; } else if (modeFlag.equals("-clstats")) { mode = MODE_CLSTATS; } else if (modeFlag.equals("-finalizerinfo")) { diff --git a/hotspot/make/Makefile b/hotspot/make/Makefile index 8dad67f074e..e0d9826e468 100644 --- a/hotspot/make/Makefile +++ b/hotspot/make/Makefile @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Top level gnumake file for hotspot builds @@ -85,15 +85,15 @@ else endif # Typical C1/C2 targets made available with this Makefile -C1_VM_TARGETS=product1 fastdebug1 optimized1 jvmg1 -C2_VM_TARGETS=product fastdebug optimized jvmg -ZERO_VM_TARGETS=productzero fastdebugzero optimizedzero jvmgzero -SHARK_VM_TARGETS=productshark fastdebugshark optimizedshark jvmgshark -MINIMAL1_VM_TARGETS=productminimal1 fastdebugminimal1 jvmgminimal1 +C1_VM_TARGETS=product1 fastdebug1 optimized1 debug1 +C2_VM_TARGETS=product fastdebug optimized debug +ZERO_VM_TARGETS=productzero fastdebugzero optimizedzero debugzero +SHARK_VM_TARGETS=productshark fastdebugshark optimizedshark debugshark +MINIMAL1_VM_TARGETS=productminimal1 fastdebugminimal1 debugminimal1 COMMON_VM_PRODUCT_TARGETS=product product1 docs export_product COMMON_VM_FASTDEBUG_TARGETS=fastdebug fastdebug1 docs export_fastdebug -COMMON_VM_DEBUG_TARGETS=jvmg jvmg1 docs export_debug +COMMON_VM_DEBUG_TARGETS=debug debug1 docs export_debug # JDK directory list JDK_DIRS=bin include jre lib demo @@ -103,13 +103,13 @@ all: all_product all_fastdebug ifeq ($(JVM_VARIANT_MINIMAL1),true) all_product: productminimal1 all_fastdebug: fastdebugminimal1 -all_debug: jvmgminimal1 +all_debug: debugminimal1 endif ifdef BUILD_CLIENT_ONLY all_product: product1 docs export_product all_fastdebug: fastdebug1 docs export_fastdebug -all_debug: jvmg1 docs export_debug +all_debug: debug1 docs export_debug else ifeq ($(MACOSX_UNIVERSAL),true) all_product: universal_product @@ -127,13 +127,13 @@ all_optimized: optimized optimized1 docs export_optimized allzero: all_productzero all_fastdebugzero all_productzero: productzero docs export_product all_fastdebugzero: fastdebugzero docs export_fastdebug -all_debugzero: jvmgzero docs export_debug +all_debugzero: debugzero docs export_debug all_optimizedzero: optimizedzero docs export_optimized allshark: all_productshark all_fastdebugshark all_productshark: productshark docs export_product all_fastdebugshark: fastdebugshark docs export_fastdebug -all_debugshark: jvmgshark docs export_debug +all_debugshark: debugshark docs export_debug all_optimizedshark: optimizedshark docs export_optimized # Do everything @@ -227,7 +227,7 @@ generic_buildshark: $(MKDIR) -p $(OUTPUTDIR) $(CD) $(OUTPUTDIR); \ $(MAKE) -f $(ABS_OS_MAKEFILE) \ - $(MAKE_ARGS) $(VM_TARGET) + $(MAKE_ARGS) $(VM_TARGET) generic_buildminimal1: ifeq ($(JVM_VARIANT_MINIMAL1),true) @@ -260,7 +260,7 @@ export_fastdebug: EXPORT_SUBDIR=/$(@:export_%=%) \ generic_export export_debug: - $(MAKE) BUILD_FLAVOR=$(@:export_%=%) VM_SUBDIR=${VM_DEBUG} \ + $(MAKE) BUILD_FLAVOR=$(@:export_%=%) VM_SUBDIR=$(@:export_%=%) \ EXPORT_SUBDIR=/$(@:export_%=%) \ generic_export export_optimized: @@ -281,192 +281,197 @@ export_fastdebug_jdk:: ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) \ generic_export export_debug_jdk:: - $(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) VM_SUBDIR=${VM_DEBUG} \ + $(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) VM_SUBDIR=$(@:export_%_jdk=%) \ ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) \ generic_export # Export file copy rules XUSAGE=$(HS_SRC_DIR)/share/vm/Xusage.txt -DOCS_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_docs -C1_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_compiler1 -C2_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_compiler2 -ZERO_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_zero -SHARK_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_shark -C1_DIR=$(C1_BASE_DIR)/$(VM_SUBDIR) -C2_DIR=$(C2_BASE_DIR)/$(VM_SUBDIR) -ZERO_DIR=$(ZERO_BASE_DIR)/$(VM_SUBDIR) -SHARK_DIR=$(SHARK_BASE_DIR)/$(VM_SUBDIR) -MINIMAL1_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_minimal1 -MINIMAL1_DIR=$(MINIMAL1_BASE_DIR)/$(VM_SUBDIR) +DOCS_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_docs +C1_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler1/$(VM_SUBDIR) +C2_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler2/$(VM_SUBDIR) +MINIMAL1_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_minimal1/$(VM_SUBDIR) +ZERO_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_zero/$(VM_SUBDIR) +SHARK_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_shark/$(VM_SUBDIR) +# Server (C2) ifeq ($(JVM_VARIANT_SERVER), true) - MISC_DIR=$(C2_DIR) - GEN_DIR=$(C2_BASE_DIR)/generated +# Common +$(EXPORT_SERVER_DIR)/%.diz: $(C2_DIR)/%.diz + $(install-file) +$(EXPORT_LIB_DIR)/%.jar: $(C2_DIR)/../generated/%.jar + $(install-file) +$(EXPORT_INCLUDE_DIR)/%: $(C2_DIR)/../generated/jvmtifiles/% + $(install-file) +# Windows +$(EXPORT_SERVER_DIR)/%.dll: $(C2_DIR)/%.dll + $(install-file) +$(EXPORT_SERVER_DIR)/%.pdb: $(C2_DIR)/%.pdb + $(install-file) +$(EXPORT_SERVER_DIR)/%.map: $(C2_DIR)/%.map + $(install-file) +$(EXPORT_LIB_DIR)/%.lib: $(C2_DIR)/%.lib + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.diz: $(C2_DIR)/%.diz + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.dll: $(C2_DIR)/%.dll + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.pdb: $(C2_DIR)/%.pdb + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.map: $(C2_DIR)/%.map + $(install-file) +# Unix +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_SERVER_DIR)/64/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C2_DIR)/%.debuginfo + $(install-file) +$(EXPORT_SERVER_DIR)/%.debuginfo: $(C2_DIR)/%.debuginfo + $(install-file) +$(EXPORT_SERVER_DIR)/64/%.debuginfo: $(C2_DIR)/%.debuginfo + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C2_DIR)/%.diz + $(install-file) +$(EXPORT_SERVER_DIR)/64/%.diz: $(C2_DIR)/%.diz + $(install-file) endif + +# Client (C1) ifeq ($(JVM_VARIANT_CLIENT), true) - MISC_DIR=$(C1_DIR) - GEN_DIR=$(C1_BASE_DIR)/generated -endif -ifeq ($(JVM_VARIANT_ZEROSHARK), true) - MISC_DIR=$(SHARK_DIR) - GEN_DIR=$(SHARK_BASE_DIR)/generated -endif -ifeq ($(JVM_VARIANT_ZERO), true) - MISC_DIR=$(ZERO_DIR) - GEN_DIR=$(ZERO_BASE_DIR)/generated +# Common +$(EXPORT_CLIENT_DIR)/%.diz: $(C1_DIR)/%.diz + $(install-file) +$(EXPORT_LIB_DIR)/%.jar: $(C1_DIR)/../generated/%.jar + $(install-file) +$(EXPORT_INCLUDE_DIR)/%: $(C1_DIR)/../generated/jvmtifiles/% + $(install-file) +# Windows +$(EXPORT_CLIENT_DIR)/%.dll: $(C1_DIR)/%.dll + $(install-file) +$(EXPORT_CLIENT_DIR)/%.pdb: $(C1_DIR)/%.pdb + $(install-file) +$(EXPORT_CLIENT_DIR)/%.map: $(C1_DIR)/%.map + $(install-file) +$(EXPORT_LIB_DIR)/%.lib: $(C1_DIR)/%.lib + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.diz: $(C1_DIR)/%.diz + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.dll: $(C1_DIR)/%.dll + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.pdb: $(C1_DIR)/%.pdb + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.map: $(C1_DIR)/%.map + $(install-file) +# Unix +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_CLIENT_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_CLIENT_DIR)/64/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C1_DIR)/%.debuginfo + $(install-file) +$(EXPORT_CLIENT_DIR)/%.debuginfo: $(C1_DIR)/%.debuginfo + $(install-file) +$(EXPORT_CLIENT_DIR)/64/%.debuginfo: $(C1_DIR)/%.debuginfo + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C1_DIR)/%.diz + $(install-file) +$(EXPORT_CLIENT_DIR)/64/%.diz: $(C1_DIR)/%.diz + $(install-file) endif + +# Minimal1 ifeq ($(JVM_VARIANT_MINIMAL1), true) - MISC_DIR=$(MINIMAL1_DIR) - GEN_DIR=$(MINIMAL1_BASE_DIR)/generated -endif - -# Bin files (windows) -ifeq ($(OSNAME),windows) - -# Get jvm.lib -$(EXPORT_LIB_DIR)/%.lib: $(MISC_DIR)/%.lib +# Common +$(EXPORT_MINIMAL_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz $(install-file) - -# Other libraries (like SA) -$(EXPORT_JRE_BIN_DIR)/%.diz: $(MISC_DIR)/%.diz +$(EXPORT_LIB_DIR)/%.jar: $(MINIMAL1_DIR)/../generated/%.jar $(install-file) -$(EXPORT_JRE_BIN_DIR)/%.dll: $(MISC_DIR)/%.dll +$(EXPORT_INCLUDE_DIR)/%: $(MINIMAL1_DIR)/../generated/jvmtifiles/% $(install-file) -$(EXPORT_JRE_BIN_DIR)/%.pdb: $(MISC_DIR)/%.pdb +# Windows +$(EXPORT_MINIMAL_DIR)/%.dll: $(MINIMAL1_DIR)/%.dll $(install-file) -$(EXPORT_JRE_BIN_DIR)/%.map: $(MISC_DIR)/%.map +$(EXPORT_MINIMAL_DIR)/%.pdb: $(MINIMAL1_DIR)/%.pdb $(install-file) - -# Client files always come from C1 area -$(EXPORT_CLIENT_DIR)/%.diz: $(C1_DIR)/%.diz +$(EXPORT_MINIMAL_DIR)/%.map: $(MINIMAL1_DIR)/%.map $(install-file) -$(EXPORT_CLIENT_DIR)/%.dll: $(C1_DIR)/%.dll +$(EXPORT_LIB_DIR)/%.lib: $(MINIMAL1_DIR)/%.lib $(install-file) -$(EXPORT_CLIENT_DIR)/%.pdb: $(C1_DIR)/%.pdb +$(EXPORT_JRE_BIN_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz $(install-file) -$(EXPORT_CLIENT_DIR)/%.map: $(C1_DIR)/%.map +$(EXPORT_JRE_BIN_DIR)/%.dll: $(MINIMAL1_DIR)/%.dll $(install-file) - -# Server files always come from C2 area -$(EXPORT_SERVER_DIR)/%.diz: $(C2_DIR)/%.diz +$(EXPORT_JRE_BIN_DIR)/%.pdb: $(MINIMAL1_DIR)/%.pdb $(install-file) -$(EXPORT_SERVER_DIR)/%.dll: $(C2_DIR)/%.dll +$(EXPORT_JRE_BIN_DIR)/%.map: $(MINIMAL1_DIR)/%.map $(install-file) -$(EXPORT_SERVER_DIR)/%.pdb: $(C2_DIR)/%.pdb +# Unix +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX) $(install-file) -$(EXPORT_SERVER_DIR)/%.map: $(C2_DIR)/%.map +$(EXPORT_MINIMAL_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_MINIMAL_DIR)/64/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo + $(install-file) +$(EXPORT_MINIMAL_DIR)/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo + $(install-file) +$(EXPORT_MINIMAL_DIR)/64/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz + $(install-file) +$(EXPORT_MINIMAL_DIR)/64/%.diz: $(MINIMAL1_DIR)/%.diz $(install-file) endif -# Minimal JVM files always come from minimal area -$(EXPORT_MINIMAL_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz +# Zero +ifeq ($(JVM_VARIANT_ZERO), true) +# Common +$(EXPORT_LIB_DIR)/%.jar: $(ZERO_DIR)/../generated/%.jar $(install-file) -$(EXPORT_MINIMAL_DIR)/%.dll: $(MINIMAL1_DIR)/%.dll +$(EXPORT_INCLUDE_DIR)/%: $(ZERO_DIR)/../generated/jvmtifiles/% $(install-file) -$(EXPORT_MINIMAL_DIR)/%.pdb: $(MINIMAL1_DIR)/%.pdb +# Unix +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX) $(install-file) -$(EXPORT_MINIMAL_DIR)/%.map: $(MINIMAL1_DIR)/%.map +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(ZERO_DIR)/%.diz + $(install-file) +$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_SERVER_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo + $(install-file) +$(EXPORT_SERVER_DIR)/%.diz: $(ZERO_DIR)/%.diz $(install-file) - -# Shared Library -ifneq ($(OSNAME),windows) - ifeq ($(JVM_VARIANT_SERVER), true) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_SERVER_DIR)/64/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C2_DIR)/%.debuginfo - $(install-file) - $(EXPORT_SERVER_DIR)/%.debuginfo: $(C2_DIR)/%.debuginfo - $(install-file) - $(EXPORT_SERVER_DIR)/64/%.debuginfo: $(C2_DIR)/%.debuginfo - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C2_DIR)/%.diz - $(install-file) - $(EXPORT_SERVER_DIR)/%.diz: $(C2_DIR)/%.diz - $(install-file) - $(EXPORT_SERVER_DIR)/64/%.diz: $(C2_DIR)/%.diz - $(install-file) - endif - ifeq ($(JVM_VARIANT_CLIENT), true) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_CLIENT_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_CLIENT_DIR)/64/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_CLIENT_DIR)/%.debuginfo: $(C1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_CLIENT_DIR)/64/%.debuginfo: $(C1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C1_DIR)/%.diz - $(install-file) - $(EXPORT_CLIENT_DIR)/%.diz: $(C1_DIR)/%.diz - $(install-file) - $(EXPORT_CLIENT_DIR)/64/%.diz: $(C1_DIR)/%.diz - $(install-file) - endif - ifeq ($(JVM_VARIANT_ZEROSHARK), true) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo): $(SHARK_DIR)/%.debuginfo - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(SHARK_DIR)/%.diz - $(install-file) - $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_SERVER_DIR)/%.debuginfo: $(SHARK_DIR)/%.debuginfo - $(install-file) - $(EXPORT_SERVER_DIR)/%.diz: $(SHARK_DIR)/%.diz - $(install-file) - endif - ifeq ($(JVM_VARIANT_ZERO), true) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(ZERO_DIR)/%.diz - $(install-file) - $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_SERVER_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo - $(install-file) - $(EXPORT_SERVER_DIR)/%.diz: $(ZERO_DIR)/%.diz - $(install-file) - endif - ifeq ($(JVM_VARIANT_MINIMAL1), true) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_MINIMAL_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_MINIMAL_DIR)/64/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_MINIMAL_DIR)/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_MINIMAL_DIR)/64/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz - $(install-file) - $(EXPORT_MINIMAL_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz - $(install-file) - $(EXPORT_MINIMAL_DIR)/64/%.diz: $(MINIMAL1_DIR)/%.diz - $(install-file) - endif endif -# Jar file (sa-jdi.jar) -$(EXPORT_LIB_DIR)/%.jar: $(GEN_DIR)/%.jar +# Shark +ifeq ($(JVM_VARIANT_ZEROSHARK), true) +# Common +$(EXPORT_LIB_DIR)/%.jar: $(SHARK_DIR)/../generated/%.jar $(install-file) - -# Include files (jvmti.h, jvmticmlr.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h, jfr.h) -$(EXPORT_INCLUDE_DIR)/%: $(GEN_DIR)/jvmtifiles/% +$(EXPORT_INCLUDE_DIR)/%: $(SHARK_DIR)/../generated/jvmtifiles/% $(install-file) +# Unix +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo): $(SHARK_DIR)/%.debuginfo + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(SHARK_DIR)/%.diz + $(install-file) +$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_SERVER_DIR)/%.debuginfo: $(SHARK_DIR)/%.debuginfo + $(install-file) +$(EXPORT_SERVER_DIR)/%.diz: $(SHARK_DIR)/%.diz + $(install-file) +endif $(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/code/% $(install-file) @@ -541,11 +546,11 @@ generic_test: @$(RUN_JVM) -XXaltjvm=$(ALTJVM_DIR) -showversion -help # C2 test targets -test_product test_optimized test_fastdebug test_jvmg: +test_product test_optimized test_fastdebug test_debug: @$(MAKE) generic_test ALTJVM_DIR="$(C2_DIR)/$(@:test_%=%)" # C1 test targets -test_product1 test_optimized1 test_fastdebug1 test_jvmg1: +test_product1 test_optimized1 test_fastdebug1 test_debug1: ifeq ($(ARCH_DATA_MODEL), 32) @$(MAKE) generic_test ALTJVM_DIR="$(C1_DIR)/$(@:test_%1=%)" else @@ -553,15 +558,15 @@ test_product1 test_optimized1 test_fastdebug1 test_jvmg1: endif # Zero test targets -test_productzero test_optimizedzero test_fastdebugzero test_jvmgzero: +test_productzero test_optimizedzero test_fastdebugzero test_debugzero: @$(MAKE) generic_test ALTJVM_DIR="$(ZERO_DIR)/$(@:test_%zero=%)" # Shark test targets -test_productshark test_optimizedshark test_fastdebugshark test_jvmgshark: +test_productshark test_optimizedshark test_fastdebugshark test_debugshark: @$(MAKE) generic_test ALTJVM_DIR="$(SHARK_DIR)/$(@:test_%shark=%)" # Minimal1 test targets -test_productminimal1 test_optimizedminimal1 test_fastdebugminimal1 test_jvmgminimal1: +test_productminimal1 test_optimizedminimal1 test_fastdebugminimal1 test_debugminimal1: @$(MAKE) generic_test ALTJVM_DIR="$(MINIMAL1_DIR)/$(@:test_%minimal1=%)" @@ -626,7 +631,7 @@ help: intro_help target_help variable_help notes_help examples_help # Intro help message intro_help: @$(ECHO) \ -"Makefile for the Hotspot workspace." +"Makefile for the Hotspot workspace." @$(ECHO) \ "Default behavior is to build and create an export area for the j2se builds." @@ -637,7 +642,7 @@ target_help: @$(ECHO) "world: Same as: all create_jdk" @$(ECHO) "all_product: Same as: product product1 export_product" @$(ECHO) "all_fastdebug: Same as: fastdebug fastdebug1 export_fastdebug" - @$(ECHO) "all_debug: Same as: jvmg jvmg1 export_debug" + @$(ECHO) "all_debug: Same as: debug debug1 export_debug" @$(ECHO) "all_optimized: Same as: optimized optimized1 export_optimized" @$(ECHO) "clean: Clean all areas" @$(ECHO) "export_product: Export product files to EXPORT_PATH" @@ -730,7 +735,7 @@ examples_help: @$(ECHO) \ " $(MAKE) world" @$(ECHO) \ -" $(MAKE) ALT_BOOTDIR=/opt/java/jdk$(PREVIOUS_JDK_VERSION)" +" $(MAKE) ALT_BOOTDIR=/opt/java/jdk$(PREVIOUS_JDK_VERSION)" @$(ECHO) \ " $(MAKE) ALT_JDK_IMPORT_PATH=/opt/java/jdk$(JDK_VERSION)" @@ -741,6 +746,23 @@ include $(GAMMADIR)/make/$(OSNAME)/makefiles/universal.gmk endif endif +# Compatibility for transition to new naming +warn_jvmg_deprecated: + echo "Warning: The jvmg target has been replaced with debug" + echo "Warning: Please update your usage" + +jvmg: warn_jvmg_deprecated debug + +jvmg1: warn_jvmg_deprecated debug1 + +jvmgminimal1: warn_jvmg_deprecated debugminimal1 + +jvmgcore: warn_jvmg_deprecated debugcore + +jvmgzero: warn_jvmg_deprecated debugzero + +jvmgshark: warn_jvmg_deprecated debugshark + # JPRT rule to build this workspace include $(GAMMADIR)/make/jprt.gmk diff --git a/hotspot/make/bsd/Makefile b/hotspot/make/bsd/Makefile index 024aef9cba6..d9058c24e25 100644 --- a/hotspot/make/bsd/Makefile +++ b/hotspot/make/bsd/Makefile @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -142,55 +142,43 @@ VARIANTARCH = $(subst i386,i486,$(ZERO_LIBARCH)) # # debug compiler2 _ _compiler2/debug # fastdebug compiler2 _ _compiler2/fastdebug -# jvmg compiler2 _ _compiler2/jvmg # optimized compiler2 _ _compiler2/optimized -# profiled compiler2 _ _compiler2/profiled # product compiler2 _ _compiler2/product # # debug1 compiler1 _ _compiler1/debug # fastdebug1 compiler1 _ _compiler1/fastdebug -# jvmg1 compiler1 _ _compiler1/jvmg # optimized1 compiler1 _ _compiler1/optimized -# profiled1 compiler1 _ _compiler1/profiled # product1 compiler1 _ _compiler1/product # # debugcore core _ _core/debug # fastdebugcore core _ _core/fastdebug -# jvmgcore core _ _core/jvmg # optimizedcore core _ _core/optimized -# profiledcore core _ _core/profiled # productcore core _ _core/product # # debugzero zero _ _zero/debug # fastdebugzero zero _ _zero/fastdebug -# jvmgzero zero _ _zero/jvmg # optimizedzero zero _ _zero/optimized -# profiledzero zero _ _zero/profiled # productzero zero _ _zero/product # # debugshark shark _ _shark/debug # fastdebugshark shark _ _shark/fastdebug -# jvmgshark shark _ _shark/jvmg # optimizedshark shark _ _shark/optimized -# profiledshark shark _ _shark/profiled # productshark shark _ _shark/product # # fastdebugminimal1 minimal1 _ _minimal1/fastdebug -# jvmgminimal1 minimal1 _ _minimal1/jvmg +# debugminimal1 minimal1 _ _minimal1/debug # productminimal1 minimal1 _ _minimal1/product # # What you get with each target: # -# debug* - "thin" libjvm - debug info linked into the gamma launcher +# debug* - debug compile with asserts enabled # fastdebug* - optimized compile, but with asserts enabled -# jvmg* - "fat" libjvm - debug info linked into libjvm.so # optimized* - optimized compile, no asserts -# profiled* - gprof # product* - the shippable thing: optimized compile, no asserts, -DPRODUCT # This target list needs to be coordinated with the usage message # in the build.sh script: -TARGETS = debug jvmg fastdebug optimized profiled product +TARGETS = debug fastdebug optimized product ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true) SUBDIR_DOCS = $(OSNAME)_$(VARIANTARCH)_docs @@ -354,15 +342,29 @@ docs: checks $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) jvmtidocs # Synonyms for win32-like targets. -compiler2: jvmg product +compiler2: debug product -compiler1: jvmg1 product1 +compiler1: debug1 product1 -core: jvmgcore productcore +core: debugcore productcore -zero: jvmgzero productzero +zero: debugzero productzero -shark: jvmgshark productshark +shark: debugshark productshark + +warn_jvmg_deprecated: + echo "Warning: The jvmg target has been replaced with debug" + echo "Warning: Please update your usage" + +jvmg: warn_jvmg_deprecated debug + +jvmg1: warn_jvmg_deprecated debug1 + +jvmgcore: warn_jvmg_deprecated debugcore + +jvmgzero: warn_jvmg_deprecated debugzero + +jvmgshark: warn_jvmg_deprecated debugshark clean_docs: rm -rf $(SUBDIR_DOCS) diff --git a/hotspot/make/bsd/makefiles/buildtree.make b/hotspot/make/bsd/makefiles/buildtree.make index 752e0febb76..9d0a2174c54 100644 --- a/hotspot/make/bsd/makefiles/buildtree.make +++ b/hotspot/make/bsd/makefiles/buildtree.make @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Usage: @@ -46,11 +46,11 @@ # Makefile - for "make foo" # flags.make - with macro settings # vm.make - to support making "$(MAKE) -v vm.make" in makefiles -# adlc.make - +# adlc.make - # jvmti.make - generate JVMTI bindings from the spec (JSR-163) # sa.make - generate SA jar file and natives # env.[ck]sh - environment settings -# +# # The makefiles are split this way so that "make foo" will run faster by not # having to read the dependency files for the vm. @@ -122,7 +122,7 @@ SIMPLE_DIRS = \ $(PLATFORM_DIR)/generated/jvmtifiles \ $(PLATFORM_DIR)/generated/dtracefiles -TARGETS = debug fastdebug jvmg optimized product profiled +TARGETS = debug fastdebug optimized product SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) # For dependencies and recursive makes. @@ -186,8 +186,8 @@ $(SIMPLE_DIRS): $(QUIETLY) mkdir -p $@ # Convenience macro which takes a source relative path, applies $(1) to the -# absolute path, and then replaces $(GAMMADIR) in the result with a -# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile. +# absolute path, and then replaces $(GAMMADIR) in the result with a +# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile. gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2))) # This bit is needed to enable local rebuilds. @@ -279,8 +279,6 @@ flags_vm.make: $(BUILDTREE_MAKE) ../shared_dirs.lst $(QUIETLY) ( \ $(BUILDTREE_COMMENT); \ echo; \ - [ "$(TARGET)" = profiled ] && \ - echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/optimized.make"; \ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(TARGET).make"; \ ) > $@ @@ -381,7 +379,7 @@ jdkpath.sh: $(BUILDTREE_MAKE) $(QUIETLY) ( \ $(BUILDTREE_COMMENT); \ echo "JDK=${JAVA_HOME}"; \ - ) > $@ + ) > $@ FORCE: diff --git a/hotspot/make/bsd/makefiles/debug.make b/hotspot/make/bsd/makefiles/debug.make index 5849ed67eed..c14d974fbfb 100644 --- a/hotspot/make/bsd/makefiles/debug.make +++ b/hotspot/make/bsd/makefiles/debug.make @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Sets make macros for making debug version of VM @@ -27,17 +27,16 @@ # Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS) DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) -CFLAGS += $(DEBUG_CFLAGS/BYFILE) + +# _NMT_NOINLINE_ informs NMT that no inlining by Compiler +CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_ + +# Set the environment variable HOTSPARC_GENERIC to "true" +# to inhibit the effect of the previous line on CFLAGS. # Linker mapfile MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug -_JUNK_ := $(shell echo -e >&2 ""\ - "----------------------------------------------------------------------\n" \ - "WARNING: 'make debug' is deprecated. It will be removed in the future.\n" \ - "Please use 'make jvmg' to build debug JVM. \n" \ - "----------------------------------------------------------------------\n") - VERSION = debug -SYSDEFS += -DASSERT -DDEBUG +SYSDEFS += -DASSERT PICFLAGS = DEFAULT diff --git a/hotspot/make/bsd/makefiles/defs.make b/hotspot/make/bsd/makefiles/defs.make index 4c56c973892..14d3d4aa2ca 100644 --- a/hotspot/make/bsd/makefiles/defs.make +++ b/hotspot/make/bsd/makefiles/defs.make @@ -1,5 +1,5 @@ # -# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # The common definitions for hotspot bsd builds. @@ -86,7 +86,7 @@ ifneq (,$(findstring $(ARCH), amd64 x86_64)) VM_PLATFORM = bsd_i486 HS_ARCH = x86 # We have to reset ARCH to i386 since SRCARCH relies on it - ARCH = i386 + ARCH = i386 endif endif @@ -146,9 +146,6 @@ else LIBRARY_SUFFIX=so endif -# FIXUP: The subdirectory for a debug build is NOT the same on all platforms -VM_DEBUG=jvmg - EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html # client and server subdirectories have symbolic links to ../libjsig.so @@ -177,7 +174,7 @@ ifeq ($(JVM_VARIANT_MINIMAL1),true) else EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.debuginfo endif - endif + endif endif # Serviceability Binaries diff --git a/hotspot/make/bsd/makefiles/fastdebug.make b/hotspot/make/bsd/makefiles/fastdebug.make index e175622ee10..d7913934083 100644 --- a/hotspot/make/bsd/makefiles/fastdebug.make +++ b/hotspot/make/bsd/makefiles/fastdebug.make @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Sets make macros for making debug version of VM @@ -59,5 +59,5 @@ CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE) MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug VERSION = optimized -SYSDEFS += -DASSERT -DFASTDEBUG +SYSDEFS += -DASSERT PICFLAGS = DEFAULT diff --git a/hotspot/make/bsd/makefiles/jvmg.make b/hotspot/make/bsd/makefiles/jvmg.make deleted file mode 100644 index 52dbdb94b98..00000000000 --- a/hotspot/make/bsd/makefiles/jvmg.make +++ /dev/null @@ -1,42 +0,0 @@ -# -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Sets make macros for making debug version of VM - -# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make -DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS) -DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) - -# _NMT_NOINLINE_ informs NMT that no inlining by Compiler -CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_ - -# Set the environment variable HOTSPARC_GENERIC to "true" -# to inhibit the effect of the previous line on CFLAGS. - -# Linker mapfile -MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug - -VERSION = debug -SYSDEFS += -DASSERT -DDEBUG -PICFLAGS = DEFAULT diff --git a/hotspot/make/bsd/makefiles/profiled.make b/hotspot/make/bsd/makefiles/profiled.make deleted file mode 100644 index fa5c9153b23..00000000000 --- a/hotspot/make/bsd/makefiles/profiled.make +++ /dev/null @@ -1,30 +0,0 @@ -# -# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Sets make macros for making profiled version of Gamma VM -# (It is also optimized.) - -CFLAGS += -pg -AOUT_FLAGS += -pg -LDNOMAP = true diff --git a/hotspot/make/bsd/makefiles/vm.make b/hotspot/make/bsd/makefiles/vm.make index e93765dc50f..b9528e101ee 100644 --- a/hotspot/make/bsd/makefiles/vm.make +++ b/hotspot/make/bsd/makefiles/vm.make @@ -187,7 +187,7 @@ Src_Dirs/ZERO := $(CORE_PATHS) Src_Dirs/SHARK := $(CORE_PATHS) $(SHARK_PATHS) Src_Dirs := $(Src_Dirs/$(TYPE)) -COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp chaitin\* c2_\* runtime_\* +COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\* COMPILER1_SPECIFIC_FILES := c1_\* SHARK_SPECIFIC_FILES := shark ZERO_SPECIFIC_FILES := zero diff --git a/hotspot/make/hotspot_version b/hotspot/make/hotspot_version index 865a90932d1..df4d53fe621 100644 --- a/hotspot/make/hotspot_version +++ b/hotspot/make/hotspot_version @@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013 HS_MAJOR_VER=25 HS_MINOR_VER=0 -HS_BUILD_NUMBER=28 +HS_BUILD_NUMBER=31 JDK_MAJOR_VER=1 JDK_MINOR_VER=8 diff --git a/hotspot/make/jprt.properties b/hotspot/make/jprt.properties index 8bf107758c9..af7f321b722 100644 --- a/hotspot/make/jprt.properties +++ b/hotspot/make/jprt.properties @@ -133,15 +133,15 @@ jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}} # Standard list of jprt build targets for this source tree jprt.build.targets.standard= \ - ${jprt.my.solaris.sparc}-{product|fastdebug|debug}, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug|debug}, \ - ${jprt.my.solaris.i586}-{product|fastdebug|debug}, \ - ${jprt.my.solaris.x64}-{product|fastdebug|debug}, \ - ${jprt.my.linux.i586}-{product|fastdebug|debug}, \ + ${jprt.my.solaris.sparc}-{product|fastdebug}, \ + ${jprt.my.solaris.sparcv9}-{product|fastdebug}, \ + ${jprt.my.solaris.i586}-{product|fastdebug}, \ + ${jprt.my.solaris.x64}-{product|fastdebug}, \ + ${jprt.my.linux.i586}-{product|fastdebug}, \ ${jprt.my.linux.x64}-{product|fastdebug}, \ - ${jprt.my.macosx.x64}-{product|fastdebug|debug}, \ - ${jprt.my.windows.i586}-{product|fastdebug|debug}, \ - ${jprt.my.windows.x64}-{product|fastdebug|debug}, \ + ${jprt.my.macosx.x64}-{product|fastdebug}, \ + ${jprt.my.windows.i586}-{product|fastdebug}, \ + ${jprt.my.windows.x64}-{product|fastdebug}, \ ${jprt.my.linux.armvh}-{product|fastdebug} jprt.build.targets.open= \ @@ -150,7 +150,7 @@ jprt.build.targets.open= \ ${jprt.my.linux.x64}-{productOpen} jprt.build.targets.embedded= \ - ${jprt.my.linux.i586}-{productEmb|fastdebugEmb|debugEmb}, \ + ${jprt.my.linux.i586}-{productEmb|fastdebugEmb}, \ ${jprt.my.linux.ppc}-{productEmb|fastdebugEmb}, \ ${jprt.my.linux.ppcv2}-{productEmb|fastdebugEmb}, \ ${jprt.my.linux.ppcsflt}-{productEmb|fastdebugEmb}, \ @@ -174,21 +174,18 @@ jprt.my.solaris.sparc.test.targets= \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-scimark, \ ${jprt.my.solaris.sparc}-product-{c1|c2}-runThese, \ ${jprt.my.solaris.sparc}-fastdebug-c1-runThese_Xshare, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_default, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_default, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_SerialGC, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParallelGC, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParNewGC, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_CMS, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_G1, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParOldGC, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_default, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jbb_default_nontiered, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_SerialGC, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_ParallelGC, \ @@ -201,21 +198,18 @@ jprt.my.solaris.sparcv9.test.targets= \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98_nontiered, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-scimark, \ ${jprt.my.solaris.sparcv9}-product-c2-runThese, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_default, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_SerialGC, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParallelGC, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParNewGC, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_CMS, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_G1, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParOldGC, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_default, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_SerialGC, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParallelGC, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParNewGC, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_CMS, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_G1, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParOldGC, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default_nontiered, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_SerialGC, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParallelGC, \ @@ -229,21 +223,18 @@ jprt.my.solaris.x64.test.targets= \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-scimark, \ ${jprt.my.solaris.x64}-product-c2-runThese, \ ${jprt.my.solaris.x64}-product-c2-runThese_Xcomp, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_default, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_CMS, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_G1, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_default, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_SerialGC, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \ @@ -258,28 +249,24 @@ jprt.my.solaris.i586.test.targets= \ ${jprt.my.solaris.i586}-product-{c1|c2}-runThese_Xcomp, \ ${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xcomp, \ ${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xshare, \ - ${jprt.my.solaris.i586}-product-c1-GCBasher_default, \ ${jprt.my.solaris.i586}-product-c1-GCBasher_SerialGC, \ ${jprt.my.solaris.i586}-product-c1-GCBasher_ParallelGC, \ ${jprt.my.solaris.i586}-product-c1-GCBasher_ParNewGC, \ ${jprt.my.solaris.i586}-product-c1-GCBasher_CMS, \ ${jprt.my.solaris.i586}-product-c1-GCBasher_G1, \ ${jprt.my.solaris.i586}-product-c1-GCBasher_ParOldGC, \ - ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_default, \ ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_SerialGC, \ ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParallelGC, \ ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParNewGC, \ ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_CMS, \ ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_G1, \ ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParOldGC, \ - ${jprt.my.solaris.i586}-product-c1-GCOld_default, \ ${jprt.my.solaris.i586}-product-c1-GCOld_SerialGC, \ ${jprt.my.solaris.i586}-product-c1-GCOld_ParallelGC, \ ${jprt.my.solaris.i586}-product-c1-GCOld_ParNewGC, \ ${jprt.my.solaris.i586}-product-c1-GCOld_CMS, \ ${jprt.my.solaris.i586}-product-c1-GCOld_G1, \ ${jprt.my.solaris.i586}-product-c1-GCOld_ParOldGC, \ - ${jprt.my.solaris.i586}-fastdebug-c2-jbb_default, \ ${jprt.my.solaris.i586}-fastdebug-c2-jbb_default_nontiered, \ ${jprt.my.solaris.i586}-fastdebug-c2-jbb_ParallelGC, \ ${jprt.my.solaris.i586}-fastdebug-c2-jbb_CMS, \ @@ -293,21 +280,19 @@ jprt.my.linux.i586.test.targets = \ ${jprt.my.linux.i586}-product-c1-runThese_Xcomp, \ ${jprt.my.linux.i586}-fastdebug-c1-runThese_Xshare, \ ${jprt.my.linux.i586}-fastdebug-c2-runThese_Xcomp, \ - ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_default, \ ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \ ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \ ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \ ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \ ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \ ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \ - ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_default, \ ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_SerialGC, \ ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParallelGC, \ ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParNewGC, \ ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_CMS, \ ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_G1, \ ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParOldGC, \ - ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_default, \ + ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_SerialGC, \ ${jprt.my.linux.i586}-{product|fastdebug}-c2-jbb_default_nontiered, \ ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParallelGC, \ ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_CMS, \ @@ -318,21 +303,18 @@ jprt.my.linux.x64.test.targets = \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98_nontiered, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-scimark, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_default, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_CMS, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_G1, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_default, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_CMS, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_G1, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_G1, \ @@ -342,21 +324,18 @@ jprt.my.macosx.x64.test.targets = \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98_nontiered, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-scimark, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_default, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_CMS, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_G1, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_default, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_CMS, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_G1, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \ @@ -369,14 +348,12 @@ jprt.my.windows.i586.test.targets = \ ${jprt.my.windows.i586}-product-{c1|c2}-runThese, \ ${jprt.my.windows.i586}-product-{c1|c2}-runThese_Xcomp, \ ${jprt.my.windows.i586}-fastdebug-c1-runThese_Xshare, \ - ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_default, \ ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \ ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \ ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \ ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \ ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \ ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \ - ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_default, \ ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_SerialGC, \ ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParallelGC, \ ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParNewGC, \ @@ -396,14 +373,12 @@ jprt.my.windows.x64.test.targets = \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-scimark, \ ${jprt.my.windows.x64}-product-c2-runThese, \ ${jprt.my.windows.x64}-product-c2-runThese_Xcomp, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_default, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_CMS, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_G1, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_default, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \ @@ -419,7 +394,7 @@ jprt.my.windows.x64.test.targets = \ # Some basic "smoke" tests for OpenJDK builds jprt.test.targets.open = \ - ${jprt.my.solaris.x64}-{productOpen|debugOpen|fastdebugOpen}-c2-jvm98, \ + ${jprt.my.solaris.x64}-{productOpen|fastdebugOpen}-c2-jvm98, \ ${jprt.my.solaris.i586}-{productOpen|fastdebugOpen}-c2-jvm98, \ ${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98 @@ -520,5 +495,5 @@ jprt.make.rule.test.targets.jdk7u8=${jprt.make.rule.test.targets.jdk7} jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}} # 7155453: Work-around to prevent popups on OSX from blocking test completion -# but the work-around is added to all platforms to be consistent +# but the work-around is added to all platforms to be consistent jprt.jbb.options=-Djava.awt.headless=true diff --git a/hotspot/make/linux/Makefile b/hotspot/make/linux/Makefile index 04d0b8e5c64..0820c254612 100644 --- a/hotspot/make/linux/Makefile +++ b/hotspot/make/linux/Makefile @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -142,55 +142,42 @@ VARIANTARCH = $(subst i386,i486,$(ZERO_LIBARCH)) # # debug compiler2 _ _compiler2/debug # fastdebug compiler2 _ _compiler2/fastdebug -# jvmg compiler2 _ _compiler2/jvmg # optimized compiler2 _ _compiler2/optimized -# profiled compiler2 _ _compiler2/profiled # product compiler2 _ _compiler2/product # # debug1 compiler1 _ _compiler1/debug # fastdebug1 compiler1 _ _compiler1/fastdebug -# jvmg1 compiler1 _ _compiler1/jvmg # optimized1 compiler1 _ _compiler1/optimized -# profiled1 compiler1 _ _compiler1/profiled # product1 compiler1 _ _compiler1/product # # debugcore core _ _core/debug # fastdebugcore core _ _core/fastdebug -# jvmgcore core _ _core/jvmg # optimizedcore core _ _core/optimized -# profiledcore core _ _core/profiled # productcore core _ _core/product # # debugzero zero _ _zero/debug # fastdebugzero zero _ _zero/fastdebug -# jvmgzero zero _ _zero/jvmg # optimizedzero zero _ _zero/optimized -# profiledzero zero _ _zero/profiled # productzero zero _ _zero/product # # debugshark shark _ _shark/debug # fastdebugshark shark _ _shark/fastdebug -# jvmgshark shark _ _shark/jvmg # optimizedshark shark _ _shark/optimized -# profiledshark shark _ _shark/profiled # productshark shark _ _shark/product # # fastdebugminimal1 minimal1 _ _minimal1/fastdebug -# jvmgminimal1 minimal1 _ _minimal1/jvmg # productminimal1 minimal1 _ _minimal1/product # # What you get with each target: # -# debug* - "thin" libjvm - debug info linked into the gamma launcher +# debug* - debug compile with asserts enabled # fastdebug* - optimized compile, but with asserts enabled -# jvmg* - "fat" libjvm - debug info linked into libjvm.so # optimized* - optimized compile, no asserts -# profiled* - gprof # product* - the shippable thing: optimized compile, no asserts, -DPRODUCT # This target list needs to be coordinated with the usage message # in the build.sh script: -TARGETS = debug jvmg fastdebug optimized profiled product +TARGETS = debug fastdebug optimized product ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true) SUBDIR_DOCS = $(OSNAME)_$(VARIANTARCH)_docs @@ -357,15 +344,29 @@ docs: checks $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) BUILD_FLAVOR=product jvmtidocs # Synonyms for win32-like targets. -compiler2: jvmg product +compiler2: debug product -compiler1: jvmg1 product1 +compiler1: debug1 product1 -core: jvmgcore productcore +core: debugcore productcore -zero: jvmgzero productzero +zero: debugzero productzero -shark: jvmgshark productshark +shark: debugshark productshark + +warn_jvmg_deprecated: + echo "Warning: The jvmg target has been replaced with debug" + echo "Warning: Please update your usage" + +jvmg: warn_jvmg_deprecated debug + +jvmg1: warn_jvmg_deprecated debug1 + +jvmgcore: warn_jvmg_deprecated debugcore + +jvmgzero: warn_jvmg_deprecated debugzero + +jvmgshark: warn_jvmg_deprecated debugshark clean_docs: rm -rf $(SUBDIR_DOCS) diff --git a/hotspot/make/linux/makefiles/buildtree.make b/hotspot/make/linux/makefiles/buildtree.make index f980dcdafe0..0b7c12001b3 100644 --- a/hotspot/make/linux/makefiles/buildtree.make +++ b/hotspot/make/linux/makefiles/buildtree.make @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Usage: @@ -46,11 +46,11 @@ # Makefile - for "make foo" # flags.make - with macro settings # vm.make - to support making "$(MAKE) -v vm.make" in makefiles -# adlc.make - +# adlc.make - # jvmti.make - generate JVMTI bindings from the spec (JSR-163) # sa.make - generate SA jar file and natives # env.[ck]sh - environment settings -# +# # The makefiles are split this way so that "make foo" will run faster by not # having to read the dependency files for the vm. @@ -117,7 +117,7 @@ SIMPLE_DIRS = \ $(PLATFORM_DIR)/generated/adfiles \ $(PLATFORM_DIR)/generated/jvmtifiles -TARGETS = debug fastdebug jvmg optimized product profiled +TARGETS = debug fastdebug optimized product SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) # For dependencies and recursive makes. @@ -179,8 +179,8 @@ $(SIMPLE_DIRS): $(QUIETLY) mkdir -p $@ # Convenience macro which takes a source relative path, applies $(1) to the -# absolute path, and then replaces $(GAMMADIR) in the result with a -# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile. +# absolute path, and then replaces $(GAMMADIR) in the result with a +# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile. gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2))) # This bit is needed to enable local rebuilds. @@ -284,8 +284,6 @@ flags_vm.make: $(BUILDTREE_MAKE) ../shared_dirs.lst $(QUIETLY) ( \ $(BUILDTREE_COMMENT); \ echo; \ - [ "$(TARGET)" = profiled ] && \ - echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/optimized.make"; \ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(TARGET).make"; \ ) > $@ @@ -376,7 +374,7 @@ jdkpath.sh: $(BUILDTREE_MAKE) $(QUIETLY) ( \ $(BUILDTREE_COMMENT); \ echo "JDK=${JAVA_HOME}"; \ - ) > $@ + ) > $@ FORCE: diff --git a/hotspot/make/linux/makefiles/debug.make b/hotspot/make/linux/makefiles/debug.make index e51d4c192d2..7c57280a12c 100644 --- a/hotspot/make/linux/makefiles/debug.make +++ b/hotspot/make/linux/makefiles/debug.make @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Sets make macros for making debug version of VM @@ -27,17 +27,16 @@ # Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS) DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) -CFLAGS += $(DEBUG_CFLAGS/BYFILE) + +# _NMT_NOINLINE_ informs NMT that no inlining by Compiler +CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_ + +# Set the environment variable HOTSPARC_GENERIC to "true" +# to inhibit the effect of the previous line on CFLAGS. # Linker mapfile MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug -_JUNK_ := $(shell echo -e >&2 ""\ - "----------------------------------------------------------------------\n" \ - "WARNING: 'make debug' is deprecated. It will be removed in the future.\n" \ - "Please use 'make jvmg' to build debug JVM. \n" \ - "----------------------------------------------------------------------\n") - VERSION = debug -SYSDEFS += -DASSERT -DDEBUG +SYSDEFS += -DASSERT PICFLAGS = DEFAULT diff --git a/hotspot/make/linux/makefiles/defs.make b/hotspot/make/linux/makefiles/defs.make index 9bdbdf226e6..778830814ac 100644 --- a/hotspot/make/linux/makefiles/defs.make +++ b/hotspot/make/linux/makefiles/defs.make @@ -1,5 +1,5 @@ # -# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # The common definitions for hotspot linux builds. @@ -92,7 +92,7 @@ ifneq (,$(findstring $(ARCH), amd64 x86_64)) VM_PLATFORM = linux_i486 HS_ARCH = x86 # We have to reset ARCH to i686 since SRCARCH relies on it - ARCH = i686 + ARCH = i686 endif endif @@ -240,9 +240,6 @@ JDK_INCLUDE_SUBDIR=linux # Library suffix LIBRARY_SUFFIX=so -# FIXUP: The subdirectory for a debug build is NOT the same on all platforms -VM_DEBUG=jvmg - EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html # client and server subdirectories have symbolic links to ../libjsig.so @@ -279,7 +276,7 @@ ifeq ($(JVM_VARIANT_CLIENT),true) else EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo endif - endif + endif endif ifeq ($(JVM_VARIANT_MINIMAL1),true) @@ -292,15 +289,15 @@ ifeq ($(JVM_VARIANT_MINIMAL1),true) else EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.debuginfo endif - endif + endif endif # Serviceability Binaries # No SA Support for PPC, IA64, ARM or zero ADD_SA_BINARIES/x86 = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \ - $(EXPORT_LIB_DIR)/sa-jdi.jar + $(EXPORT_LIB_DIR)/sa-jdi.jar ADD_SA_BINARIES/sparc = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \ - $(EXPORT_LIB_DIR)/sa-jdi.jar + $(EXPORT_LIB_DIR)/sa-jdi.jar ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) ifeq ($(ZIP_DEBUGINFO_FILES),1) ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz @@ -310,10 +307,10 @@ ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) ADD_SA_BINARIES/sparc += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo endif endif -ADD_SA_BINARIES/ppc = -ADD_SA_BINARIES/ia64 = -ADD_SA_BINARIES/arm = -ADD_SA_BINARIES/zero = +ADD_SA_BINARIES/ppc = +ADD_SA_BINARIES/ia64 = +ADD_SA_BINARIES/arm = +ADD_SA_BINARIES/zero = -include $(HS_ALT_MAKE)/linux/makefiles/defs.make diff --git a/hotspot/make/linux/makefiles/fastdebug.make b/hotspot/make/linux/makefiles/fastdebug.make index 86ffc36b7c2..abefd2b4ca0 100644 --- a/hotspot/make/linux/makefiles/fastdebug.make +++ b/hotspot/make/linux/makefiles/fastdebug.make @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Sets make macros for making debug version of VM @@ -59,5 +59,5 @@ CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE) MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug VERSION = optimized -SYSDEFS += -DASSERT -DFASTDEBUG +SYSDEFS += -DASSERT PICFLAGS = DEFAULT diff --git a/hotspot/make/linux/makefiles/jvmg.make b/hotspot/make/linux/makefiles/jvmg.make deleted file mode 100644 index 24047f7c358..00000000000 --- a/hotspot/make/linux/makefiles/jvmg.make +++ /dev/null @@ -1,42 +0,0 @@ -# -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Sets make macros for making debug version of VM - -# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make -DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS) -DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) - -# _NMT_NOINLINE_ informs NMT that no inlining by Compiler -CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_ - -# Set the environment variable HOTSPARC_GENERIC to "true" -# to inhibit the effect of the previous line on CFLAGS. - -# Linker mapfile -MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug - -VERSION = debug -SYSDEFS += -DASSERT -DDEBUG -PICFLAGS = DEFAULT diff --git a/hotspot/make/linux/makefiles/vm.make b/hotspot/make/linux/makefiles/vm.make index b31064782f7..af060f8af0b 100644 --- a/hotspot/make/linux/makefiles/vm.make +++ b/hotspot/make/linux/makefiles/vm.make @@ -189,7 +189,7 @@ Src_Dirs/ZERO := $(CORE_PATHS) Src_Dirs/SHARK := $(CORE_PATHS) $(SHARK_PATHS) Src_Dirs := $(Src_Dirs/$(TYPE)) -COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp chaitin\* c2_\* runtime_\* +COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\* COMPILER1_SPECIFIC_FILES := c1_\* SHARK_SPECIFIC_FILES := shark ZERO_SPECIFIC_FILES := zero diff --git a/hotspot/make/solaris/Makefile b/hotspot/make/solaris/Makefile index 7ae82d856f3..7ae2be418d3 100644 --- a/hotspot/make/solaris/Makefile +++ b/hotspot/make/solaris/Makefile @@ -120,37 +120,29 @@ endif # # debug compiler2 _ _compiler2/debug # fastdebug compiler2 _ _compiler2/fastdebug -# jvmg compiler2 _ _compiler2/jvmg # optimized compiler2 _ _compiler2/optimized -# profiled compiler2 _ _compiler2/profiled # product compiler2 _ _compiler2/product # # debug1 compiler1 _ _compiler1/debug # fastdebug1 compiler1 _ _compiler1/fastdebug -# jvmg1 compiler1 _ _compiler1/jvmg # optimized1 compiler1 _ _compiler1/optimized -# profiled1 compiler1 _ _compiler1/profiled # product1 compiler1 _ _compiler1/product # # debugcore core _ _core/debug # fastdebugcore core _ _core/fastdebug -# jvmgcore core _ _core/jvmg # optimizedcore core _ _core/optimized -# profiledcore core _ _core/profiled # productcore core _ _core/product # # What you get with each target: # -# debug* - "thin" libjvm - debug info linked into the gamma launcher +# debug* - debug compile with asserts enabled # fastdebug* - optimized compile, but with asserts enabled -# jvmg* - "fat" libjvm - debug info linked into libjvm.so # optimized* - optimized compile, no asserts -# profiled* - gprof # product* - the shippable thing: optimized compile, no asserts, -DPRODUCT # This target list needs to be coordinated with the usage message # in the build.sh script: -TARGETS = debug jvmg fastdebug optimized profiled product +TARGETS = debug fastdebug optimized product SUBDIR_DOCS = $(OSNAME)_$(BUILDARCH)_docs SUBDIRS_C1 = $(addprefix $(OSNAME)_$(BUILDARCH)_compiler1/,$(TARGETS)) @@ -267,11 +259,21 @@ docs: checks $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) BUILD_FLAVOR=product jvmtidocs # Synonyms for win32-like targets. -compiler2: jvmg product +compiler2: debug product -compiler1: jvmg1 product1 +compiler1: debug1 product1 -core: jvmgcore productcore +core: debugcore productcore + +warn_jvmg_deprecated: + echo "Warning: The jvmg target has been replaced with debug" + echo "Warning: Please update your usage" + +jvmg: warn_jvmg_deprecated debug + +jvmg1: warn_jvmg_deprecated debug1 + +jvmgcore: warn_jvmg_deprecated debugcore clean_docs: rm -rf $(SUBDIR_DOCS) diff --git a/hotspot/make/solaris/makefiles/buildtree.make b/hotspot/make/solaris/makefiles/buildtree.make index a3ab0b5e52c..989470f293f 100644 --- a/hotspot/make/solaris/makefiles/buildtree.make +++ b/hotspot/make/solaris/makefiles/buildtree.make @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Usage: @@ -46,11 +46,11 @@ # Makefile - for "make foo" # flags.make - with macro settings # vm.make - to support making "$(MAKE) -v vm.make" in makefiles -# adlc.make - +# adlc.make - # jvmti.make - generate JVMTI bindings from the spec (JSR-163) # sa.make - generate SA jar file and natives # env.[ck]sh - environment settings -# +# # The makefiles are split this way so that "make foo" will run faster by not # having to read the dependency files for the vm. @@ -69,7 +69,7 @@ PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH).gcc GCC_LIB = /usr/local/lib else PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH) -GCC_LIB = +GCC_LIB = endif ifdef FORCE_TIERED @@ -110,7 +110,7 @@ SIMPLE_DIRS = \ $(PLATFORM_DIR)/generated/adfiles \ $(PLATFORM_DIR)/generated/jvmtifiles -TARGETS = debug fastdebug jvmg optimized product profiled +TARGETS = debug fastdebug optimized product SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) # For dependencies and recursive makes. @@ -153,7 +153,7 @@ ifndef OPENJDK endif endif -BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HS_BUILD_VER) HOTSPOT_BUILD_VERSION= JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) +BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HS_BUILD_VER) HOTSPOT_BUILD_VERSION= JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) BUILDTREE = \ $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_TARGETS) $(BUILDTREE_VARS) @@ -172,8 +172,8 @@ $(SIMPLE_DIRS): $(QUIETLY) mkdir -p $@ # Convenience macro which takes a source relative path, applies $(1) to the -# absolute path, and then replaces $(GAMMADIR) in the result with a -# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile. +# absolute path, and then replaces $(GAMMADIR) in the result with a +# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile. gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2))) # This bit is needed to enable local rebuilds. @@ -274,8 +274,6 @@ flags_vm.make: $(BUILDTREE_MAKE) ../shared_dirs.lst $(QUIETLY) ( \ $(BUILDTREE_COMMENT); \ echo; \ - [ "$(TARGET)" = profiled ] && \ - echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/optimized.make"; \ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(TARGET).make"; \ ) > $@ @@ -366,7 +364,7 @@ jdkpath.sh: $(BUILDTREE_MAKE) $(QUIETLY) ( \ $(BUILDTREE_COMMENT); \ echo "JDK=${JAVA_HOME}"; \ - ) > $@ + ) > $@ FORCE: diff --git a/hotspot/make/solaris/makefiles/debug.make b/hotspot/make/solaris/makefiles/debug.make index 602c07dc366..3fba8e1239d 100644 --- a/hotspot/make/solaris/makefiles/debug.make +++ b/hotspot/make/solaris/makefiles/debug.make @@ -1,5 +1,5 @@ # -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Sets make macros for making debug version of VM @@ -37,22 +37,20 @@ ifeq ($(COMPILER_REV_NUMERIC),508) endif endif -CFLAGS += $(DEBUG_CFLAGS/BYFILE) +# _NMT_NOINLINE_ informs NMT that no inlining by Compiler +CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_ + +# Set the environment variable HOTSPARC_GENERIC to "true" +# to inhibit the effect of the previous line on CFLAGS. # Linker mapfiles MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \ $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug -# This mapfile is only needed when compiling with dtrace support, +# This mapfile is only needed when compiling with dtrace support, # and mustn't be otherwise. MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE) -_JUNK_ := $(shell echo >&2 ""\ - "-------------------------------------------------------------------------\n" \ - "WARNING: 'gnumake debug' is deprecated. It will be removed in the future.\n" \ - "Please use 'gnumake jvmg' to build debug JVM. \n" \ - "-------------------------------------------------------------------------\n") - VERSION = debug -SYSDEFS += -DASSERT -DDEBUG +SYSDEFS += -DASSERT PICFLAGS = DEFAULT diff --git a/hotspot/make/solaris/makefiles/defs.make b/hotspot/make/solaris/makefiles/defs.make index 14d0aced5a1..74ca7f70e2a 100644 --- a/hotspot/make/solaris/makefiles/defs.make +++ b/hotspot/make/solaris/makefiles/defs.make @@ -1,5 +1,5 @@ # -# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # The common definitions for hotspot solaris builds. @@ -172,9 +172,6 @@ JDK_INCLUDE_SUBDIR=solaris # Library suffix LIBRARY_SUFFIX=so -# FIXUP: The subdirectory for a debug build is NOT the same on all platforms -VM_DEBUG=jvmg - EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html # client and server subdirectories have symbolic links to ../libjsig.$(LIBRARY_SUFFIX) @@ -221,8 +218,8 @@ ifeq ($(JVM_VARIANT_SERVER),true) endif ifeq ($(JVM_VARIANT_CLIENT),true) EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt - EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX) - EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.$(LIBRARY_SUFFIX) + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX) + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.$(LIBRARY_SUFFIX) EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_dtrace.$(LIBRARY_SUFFIX) ifeq ($(ARCH_DATA_MODEL),32) EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.$(LIBRARY_SUFFIX) @@ -257,4 +254,4 @@ ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo endif endif -EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar +EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar diff --git a/hotspot/make/solaris/makefiles/fastdebug.make b/hotspot/make/solaris/makefiles/fastdebug.make index fdafd773bd3..3719c3edbda 100644 --- a/hotspot/make/solaris/makefiles/fastdebug.make +++ b/hotspot/make/solaris/makefiles/fastdebug.make @@ -1,5 +1,5 @@ # -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Sets make macros for making debug version of VM @@ -118,10 +118,10 @@ CFLAGS += $(DEBUG_CFLAGS/BYFILE) MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \ $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug -# This mapfile is only needed when compiling with dtrace support, +# This mapfile is only needed when compiling with dtrace support, # and mustn't be otherwise. MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE) VERSION = optimized -SYSDEFS += -DASSERT -DFASTDEBUG -DCHECK_UNHANDLED_OOPS +SYSDEFS += -DASSERT -DCHECK_UNHANDLED_OOPS PICFLAGS = DEFAULT diff --git a/hotspot/make/solaris/makefiles/jvmg.make b/hotspot/make/solaris/makefiles/jvmg.make deleted file mode 100644 index c9102393c8f..00000000000 --- a/hotspot/make/solaris/makefiles/jvmg.make +++ /dev/null @@ -1,56 +0,0 @@ -# -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Sets make macros for making debug version of VM - -# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make -DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS) -DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) - -ifeq ("${Platform_compiler}", "sparcWorks") - -ifeq ($(COMPILER_REV_NUMERIC),508) - # SS11 SEGV when compiling with -g and -xarch=v8, using different backend - DEBUG_CFLAGS/compileBroker.o = $(DEBUG_CFLAGS) -xO0 - DEBUG_CFLAGS/jvmtiTagMap.o = $(DEBUG_CFLAGS) -xO0 -endif -endif - -# _NMT_NOINLINE_ informs NMT that no inlining by Compiler -CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_ - -# Set the environment variable HOTSPARC_GENERIC to "true" -# to inhibit the effect of the previous line on CFLAGS. - -# Linker mapfiles -MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \ - $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug - -# This mapfile is only needed when compiling with dtrace support, -# and mustn't be otherwise. -MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE) - -VERSION = debug -SYSDEFS += -DASSERT -DDEBUG -PICFLAGS = DEFAULT diff --git a/hotspot/make/solaris/makefiles/profiled.make b/hotspot/make/solaris/makefiles/profiled.make deleted file mode 100644 index cbbdb03bcdb..00000000000 --- a/hotspot/make/solaris/makefiles/profiled.make +++ /dev/null @@ -1,44 +0,0 @@ -# -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Sets make macros for making profiled version of Gamma VM -# (It is also optimized.) - -CFLAGS += -pg - -# On x86 Solaris 2.6, 7, and 8 if LD_LIBRARY_PATH has /usr/lib in it then -# adlc linked with -pg puts out empty header files. To avoid linking adlc -# with -pg the profile flag is split out separately and used in rules.make - -PROF_AOUT_FLAGS += -pg - -# To do a profiled build of the product, such as for generating the -# reordering file, set PROFILE_PRODUCT. Otherwise the reordering file will -# contain references to functions which are not defined in the PRODUCT build. - -ifdef PROFILE_PRODUCT - SYSDEFS += -DPRODUCT -endif - -LDNOMAP = true diff --git a/hotspot/make/solaris/makefiles/vm.make b/hotspot/make/solaris/makefiles/vm.make index 5aca8f05f76..62146d77f03 100644 --- a/hotspot/make/solaris/makefiles/vm.make +++ b/hotspot/make/solaris/makefiles/vm.make @@ -202,7 +202,7 @@ Src_Dirs/ZERO := $(CORE_PATHS) Src_Dirs/SHARK := $(CORE_PATHS) Src_Dirs := $(Src_Dirs/$(TYPE)) -COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp chaitin\* c2_\* runtime_\* +COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\* COMPILER1_SPECIFIC_FILES := c1_\* SHARK_SPECIFIC_FILES := shark ZERO_SPECIFIC_FILES := zero diff --git a/hotspot/make/windows/build.make b/hotspot/make/windows/build.make index e2f50542523..c072a170135 100644 --- a/hotspot/make/windows/build.make +++ b/hotspot/make/windows/build.make @@ -1,5 +1,5 @@ # -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -235,18 +235,14 @@ product release optimized: checks $(variantDir) $(variantDir)\local.make sanity cd $(variantDir) nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=product ARCH=$(ARCH) -# The debug or jvmg (all the same thing) is an optional build -debug jvmg: checks $(variantDir) $(variantDir)\local.make sanity +# The debug build is an optional build +debug: checks $(variantDir) $(variantDir)\local.make sanity cd $(variantDir) nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=debug ARCH=$(ARCH) fastdebug: checks $(variantDir) $(variantDir)\local.make sanity cd $(variantDir) nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=fastdebug ARCH=$(ARCH) -develop: checks $(variantDir) $(variantDir)\local.make sanity - cd $(variantDir) - nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=product DEVELOP=1 ARCH=$(ARCH) - # target to create just the directory structure tree: checks $(variantDir) $(variantDir)\local.make sanity mkdir $(variantDir)\product diff --git a/hotspot/make/windows/create_obj_files.sh b/hotspot/make/windows/create_obj_files.sh index 14a7087f2aa..257b3f140d2 100644 --- a/hotspot/make/windows/create_obj_files.sh +++ b/hotspot/make/windows/create_obj_files.sh @@ -114,7 +114,7 @@ case "${TYPE}" in "shark") Src_Dirs="${CORE_PATHS}" ;; esac -COMPILER2_SPECIFIC_FILES="opto libadt bcEscapeAnalyzer.cpp chaitin* c2_* runtime_*" +COMPILER2_SPECIFIC_FILES="opto libadt bcEscapeAnalyzer.cpp c2_* runtime_*" COMPILER1_SPECIFIC_FILES="c1_*" SHARK_SPECIFIC_FILES="shark" ZERO_SPECIFIC_FILES="zero" diff --git a/hotspot/make/windows/makefiles/defs.make b/hotspot/make/windows/makefiles/defs.make index ca1f327be75..4bee0afa8b0 100644 --- a/hotspot/make/windows/makefiles/defs.make +++ b/hotspot/make/windows/makefiles/defs.make @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # The common definitions for hotspot windows builds. @@ -209,8 +209,6 @@ endif ifneq (,$(findstring MINGW,$(SYSTEM_UNAME))) USING_MINGW=true endif -# FIXUP: The subdirectory for a debug build is NOT the same on all platforms -VM_DEBUG=debug # Windows wants particular paths due to nmake (must be after macros defined) # It is important that gnumake invokes nmake with C:\\...\\ formated @@ -292,7 +290,7 @@ ifeq ($(BUILD_WIN_SA), 1) MAKE_ARGS += BUILD_WIN_SA=1 endif -# Propagate compiler and tools paths from configure to nmake. +# Propagate compiler and tools paths from configure to nmake. # Need to make sure they contain \\ and not /. ifneq ($(SPEC),) ifeq ($(USING_CYGWIN), true) diff --git a/hotspot/make/windows/makefiles/vm.make b/hotspot/make/windows/makefiles/vm.make index bd02d42fa4f..9e7c64b8f33 100644 --- a/hotspot/make/windows/makefiles/vm.make +++ b/hotspot/make/windows/makefiles/vm.make @@ -31,11 +31,7 @@ COMMONSRC=$(WorkSpace)\src ALTSRC=$(WorkSpace)\src\closed !ifdef RELEASE -!ifdef DEVELOP -CXX_FLAGS=$(CXX_FLAGS) /D "DEBUG" -!else CXX_FLAGS=$(CXX_FLAGS) /D "PRODUCT" -!endif !else CXX_FLAGS=$(CXX_FLAGS) /D "ASSERT" !endif @@ -186,7 +182,7 @@ VM_PATH={$(VM_PATH)} # Special case files not using precompiled header files. -c1_RInfo_$(Platform_arch).obj: $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp +c1_RInfo_$(Platform_arch).obj: $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp os_windows.obj: $(WorkSpace)\src\os\windows\vm\os_windows.cpp diff --git a/hotspot/make/windows/projectfiles/compiler2/ADLCompiler.dsp b/hotspot/make/windows/projectfiles/compiler2/ADLCompiler.dsp index a93b8c46eca..557cce42c76 100644 --- a/hotspot/make/windows/projectfiles/compiler2/ADLCompiler.dsp +++ b/hotspot/make/windows/projectfiles/compiler2/ADLCompiler.dsp @@ -67,7 +67,7 @@ LINK32=link.exe # PROP Ignore_Export_Lib 0 # PROP Target_Dir "" # ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /ML /W3 /WX /Gm /GX /Zi /Od /I "." /I "$(HotSpotWorkSpace)\src\share\vm\opto" /I "$(HotSpotWorkSpace)\src\share\vm\prims" /I "$(HotSpotWorkSpace)\src\share\vm\lookup" /I "$(HotSpotWorkSpace)\src\share\vm\interpreter" /I "$(HotSpotWorkSpace)\src\share\vm\asm" /I "$(HotSpotWorkSpace)\src\share\vm\compiler" /I "$(HotSpotWorkSpace)\src\share\vm\utilities" /I "$(HotSpotWorkSpace)\src\share\vm\code" /I "$(HotSpotWorkSpace)\src\share\vm\oops" /I "$(HotSpotWorkSpace)\src\share\vm\runtime" /I "$(HotSpotWorkSpace)\src\share\vm\memory" /I "$(HotSpotWorkSpace)\src\share\vm\libadt" /I "$(HotSpotWorkSpace)\src\cpu\i486\vm" /I "$(HotSpotWorkSpace)\src\os\win32\vm" /D "WIN32" /D "DEBUG" /D "_WINDOWS" /D "ASSERT" /Fr /FD /c +# ADD CPP /nologo /ML /W3 /WX /Gm /GX /Zi /Od /I "." /I "$(HotSpotWorkSpace)\src\share\vm\opto" /I "$(HotSpotWorkSpace)\src\share\vm\prims" /I "$(HotSpotWorkSpace)\src\share\vm\lookup" /I "$(HotSpotWorkSpace)\src\share\vm\interpreter" /I "$(HotSpotWorkSpace)\src\share\vm\asm" /I "$(HotSpotWorkSpace)\src\share\vm\compiler" /I "$(HotSpotWorkSpace)\src\share\vm\utilities" /I "$(HotSpotWorkSpace)\src\share\vm\code" /I "$(HotSpotWorkSpace)\src\share\vm\oops" /I "$(HotSpotWorkSpace)\src\share\vm\runtime" /I "$(HotSpotWorkSpace)\src\share\vm\memory" /I "$(HotSpotWorkSpace)\src\share\vm\libadt" /I "$(HotSpotWorkSpace)\src\cpu\i486\vm" /I "$(HotSpotWorkSpace)\src\os\win32\vm" /D "WIN32" /D "_WINDOWS" /D "ASSERT" /Fr /FD /c # ADD BASE RSC /l 0x409 # ADD RSC /l 0x409 BSC32=bscmake.exe diff --git a/hotspot/make/windows/projectfiles/tiered/ADLCompiler.dsp b/hotspot/make/windows/projectfiles/tiered/ADLCompiler.dsp index a93b8c46eca..557cce42c76 100644 --- a/hotspot/make/windows/projectfiles/tiered/ADLCompiler.dsp +++ b/hotspot/make/windows/projectfiles/tiered/ADLCompiler.dsp @@ -67,7 +67,7 @@ LINK32=link.exe # PROP Ignore_Export_Lib 0 # PROP Target_Dir "" # ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /ML /W3 /WX /Gm /GX /Zi /Od /I "." /I "$(HotSpotWorkSpace)\src\share\vm\opto" /I "$(HotSpotWorkSpace)\src\share\vm\prims" /I "$(HotSpotWorkSpace)\src\share\vm\lookup" /I "$(HotSpotWorkSpace)\src\share\vm\interpreter" /I "$(HotSpotWorkSpace)\src\share\vm\asm" /I "$(HotSpotWorkSpace)\src\share\vm\compiler" /I "$(HotSpotWorkSpace)\src\share\vm\utilities" /I "$(HotSpotWorkSpace)\src\share\vm\code" /I "$(HotSpotWorkSpace)\src\share\vm\oops" /I "$(HotSpotWorkSpace)\src\share\vm\runtime" /I "$(HotSpotWorkSpace)\src\share\vm\memory" /I "$(HotSpotWorkSpace)\src\share\vm\libadt" /I "$(HotSpotWorkSpace)\src\cpu\i486\vm" /I "$(HotSpotWorkSpace)\src\os\win32\vm" /D "WIN32" /D "DEBUG" /D "_WINDOWS" /D "ASSERT" /Fr /FD /c +# ADD CPP /nologo /ML /W3 /WX /Gm /GX /Zi /Od /I "." /I "$(HotSpotWorkSpace)\src\share\vm\opto" /I "$(HotSpotWorkSpace)\src\share\vm\prims" /I "$(HotSpotWorkSpace)\src\share\vm\lookup" /I "$(HotSpotWorkSpace)\src\share\vm\interpreter" /I "$(HotSpotWorkSpace)\src\share\vm\asm" /I "$(HotSpotWorkSpace)\src\share\vm\compiler" /I "$(HotSpotWorkSpace)\src\share\vm\utilities" /I "$(HotSpotWorkSpace)\src\share\vm\code" /I "$(HotSpotWorkSpace)\src\share\vm\oops" /I "$(HotSpotWorkSpace)\src\share\vm\runtime" /I "$(HotSpotWorkSpace)\src\share\vm\memory" /I "$(HotSpotWorkSpace)\src\share\vm\libadt" /I "$(HotSpotWorkSpace)\src\cpu\i486\vm" /I "$(HotSpotWorkSpace)\src\os\win32\vm" /D "WIN32" /D "_WINDOWS" /D "ASSERT" /Fr /FD /c # ADD BASE RSC /l 0x409 # ADD RSC /l 0x409 BSC32=bscmake.exe diff --git a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp index 6723ef2c352..be4ae63e6aa 100644 --- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp +++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp @@ -1000,9 +1000,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); assert(deopt_blob != NULL, "deoptimization blob must have been created"); restore_live_registers(sasm); - __ restore(); - __ br(Assembler::always, false, Assembler::pt, deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type); - __ delayed()->nop(); + + AddressLiteral dest(deopt_blob->unpack_with_reexecution()); + __ jump_to(dest, O0); + __ delayed()->restore(); } break; diff --git a/hotspot/src/cpu/sparc/vm/compiledIC_sparc.cpp b/hotspot/src/cpu/sparc/vm/compiledIC_sparc.cpp new file mode 100644 index 00000000000..791331168d2 --- /dev/null +++ b/hotspot/src/cpu/sparc/vm/compiledIC_sparc.cpp @@ -0,0 +1,193 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "code/compiledIC.hpp" +#include "code/icBuffer.hpp" +#include "code/nmethod.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/safepoint.hpp" +#ifdef COMPILER2 +#include "opto/matcher.hpp" +#endif + +// Release the CompiledICHolder* associated with this call site is there is one. +void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + if (is_icholder_entry(call->destination())) { + NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value()); + InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data()); + } +} + +bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + return is_icholder_entry(call->destination()); +} + +//----------------------------------------------------------------------------- +// High-level access to an inline cache. Guaranteed to be MT-safe. + +CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) + : _ic_call(call) +{ + address ic_call = call->instruction_address(); + + assert(ic_call != NULL, "ic_call address must be set"); + assert(nm != NULL, "must pass nmethod"); + assert(nm->contains(ic_call), "must be in nmethod"); + + // Search for the ic_call at the given address. + RelocIterator iter(nm, ic_call, ic_call+1); + bool ret = iter.next(); + assert(ret == true, "relocInfo must exist at this address"); + assert(iter.addr() == ic_call, "must find ic_call"); + if (iter.type() == relocInfo::virtual_call_type) { + virtual_call_Relocation* r = iter.virtual_call_reloc(); + _is_optimized = false; + _value = nativeMovConstReg_at(r->cached_value()); + } else { + assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); + _is_optimized = true; + _value = NULL; + } +} + +// ---------------------------------------------------------------------------- + +#define __ _masm. +void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { +#ifdef COMPILER2 + // Stub is fixed up when the corresponding call is converted from calling + // compiled code to calling interpreted code. + // set (empty), G5 + // jmp -1 + + address mark = cbuf.insts_mark(); // Get mark within main instrs section. + + MacroAssembler _masm(&cbuf); + + address base = + __ start_a_stub(to_interp_stub_size()*2); + if (base == NULL) return; // CodeBuffer::expand failed. + + // Static stub relocation stores the instruction address of the call. + __ relocate(static_stub_Relocation::spec(mark)); + + __ set_metadata(NULL, as_Register(Matcher::inline_cache_reg_encode())); + + __ set_inst_mark(); + AddressLiteral addrlit(-1); + __ JUMP(addrlit, G3, 0); + + __ delayed()->nop(); + + // Update current stubs pointer and restore code_end. + __ end_a_stub(); +#else + ShouldNotReachHere(); +#endif +} +#undef __ + +int CompiledStaticCall::to_interp_stub_size() { + // This doesn't need to be accurate but it must be larger or equal to + // the real size of the stub. + return (NativeMovConstReg::instruction_size + // sethi/setlo; + NativeJump::instruction_size + // sethi; jmp; nop + (TraceJumps ? 20 * BytesPerInstWord : 0) ); +} + +// Relocation entries for call stub, compiled java to interpreter. +int CompiledStaticCall::reloc_to_interp_stub() { + return 10; // 4 in emit_java_to_interp + 1 in Java_Static_Call +} + +void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { + address stub = find_stub(); + guarantee(stub != NULL, "stub not found"); + + if (TraceICs) { + ResourceMark rm; + tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", + instruction_address(), + callee->name_and_sig_as_C_string()); + } + + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + + assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), + "a) MT-unsafe modification of inline cache"); + assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, + "b) MT-unsafe modification of inline cache"); + + // Update stub. + method_holder->set_data((intptr_t)callee()); + jump->set_jump_destination(entry); + + // Update jump to call. + set_destination_mt_safe(stub); +} + +void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); + // Reset stub. + address stub = static_stub->addr(); + assert(stub != NULL, "stub not found"); + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + method_holder->set_data(0); + jump->set_jump_destination((address)-1); +} + +//----------------------------------------------------------------------------- +// Non-product mode code +#ifndef PRODUCT + +void CompiledStaticCall::verify() { + // Verify call. + NativeCall::verify(); + if (os::is_MP()) { + verify_alignment(); + } + + // Verify stub. + address stub = find_stub(); + assert(stub != NULL, "no stub found for static call"); + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + + // Verify state. + assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); +} + +#endif // !PRODUCT diff --git a/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp b/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp index b1451fb0bae..8a71d0923ab 100644 --- a/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp +++ b/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -404,14 +404,20 @@ address CppInterpreter::deopt_entry(TosState state, int length) { // ??: invocation counter // void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { + Label done; + const Register Rcounters = G3_scratch; + + __ ld_ptr(STATE(_method), G5_method); + __ get_method_counters(G5_method, Rcounters, done); + // Update standard invocation counters - __ increment_invocation_counter(O0, G3_scratch); - if (ProfileInterpreter) { // %%% Merge this into MethodData* - __ ld_ptr(STATE(_method), G3_scratch); - Address interpreter_invocation_counter(G3_scratch, 0, in_bytes(Method::interpreter_invocation_counter_offset())); - __ ld(interpreter_invocation_counter, G3_scratch); - __ inc(G3_scratch); - __ st(G3_scratch, interpreter_invocation_counter); + __ increment_invocation_counter(Rcounters, O0, G4_scratch); + if (ProfileInterpreter) { + Address interpreter_invocation_counter(Rcounters, 0, + in_bytes(MethodCounters::interpreter_invocation_counter_offset())); + __ ld(interpreter_invocation_counter, G4_scratch); + __ inc(G4_scratch); + __ st(G4_scratch, interpreter_invocation_counter); } Address invocation_limit(G3_scratch, (address)&InvocationCounter::InterpreterInvocationLimit); @@ -420,7 +426,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile __ cmp(O0, G3_scratch); __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); __ delayed()->nop(); - + __ bind(done); } address InterpreterGenerator::generate_empty_entry(void) { diff --git a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp index 1c368ffe84e..94cef1a9a25 100644 --- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp +++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -304,7 +304,7 @@ bool frame::safe_for_sender(JavaThread *thread) { // The sender should positively be an nmethod or call_stub. On sparc we might in fact see something else. // The cause of this is because at a save instruction the O7 we get is a leftover from an earlier - // window use. So if a runtime stub creates two frames (common in fastdebug/jvmg) then we see the + // window use. So if a runtime stub creates two frames (common in fastdebug/debug) then we see the // stale pc. So if the sender blob is not something we'd expect we have little choice but to declare // the stack unwalkable. pd_get_top_frame_for_signal_handler tries to recover from this by unwinding // that initial frame and retrying. diff --git a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp index 6060f003b68..d4f8b9b2341 100644 --- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp +++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ #include "oops/markOop.hpp" #include "oops/methodData.hpp" #include "oops/method.hpp" +#include "oops/methodCounters.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" #include "prims/jvmtiThreadState.hpp" @@ -2086,19 +2087,29 @@ void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) { #endif /* CC_INTERP */ -void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) { +void InterpreterMacroAssembler::get_method_counters(Register method, + Register Rcounters, + Label& skip) { + Label has_counters; + Address method_counters(method, in_bytes(Method::method_counters_offset())); + ld_ptr(method_counters, Rcounters); + br_notnull_short(Rcounters, Assembler::pt, has_counters); + call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::build_method_counters), method); + ld_ptr(method_counters, Rcounters); + br_null(Rcounters, false, Assembler::pn, skip); // No MethodCounters, OutOfMemory + delayed()->nop(); + bind(has_counters); +} + +void InterpreterMacroAssembler::increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) { assert(UseCompiler, "incrementing must be useful"); -#ifdef CC_INTERP - Address inv_counter(G5_method, Method::invocation_counter_offset() + + assert_different_registers(Rcounters, Rtmp, Rtmp2); + + Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() + InvocationCounter::counter_offset()); - Address be_counter (G5_method, Method::backedge_counter_offset() + + Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() + InvocationCounter::counter_offset()); -#else - Address inv_counter(Lmethod, Method::invocation_counter_offset() + - InvocationCounter::counter_offset()); - Address be_counter (Lmethod, Method::backedge_counter_offset() + - InvocationCounter::counter_offset()); -#endif /* CC_INTERP */ int delta = InvocationCounter::count_increment; // Load each counter in a register @@ -2122,19 +2133,15 @@ void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Reg // Note that this macro must leave the backedge_count + invocation_count in Rtmp! } -void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) { +void InterpreterMacroAssembler::increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) { assert(UseCompiler, "incrementing must be useful"); -#ifdef CC_INTERP - Address be_counter (G5_method, Method::backedge_counter_offset() + + assert_different_registers(Rcounters, Rtmp, Rtmp2); + + Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() + InvocationCounter::counter_offset()); - Address inv_counter(G5_method, Method::invocation_counter_offset() + + Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() + InvocationCounter::counter_offset()); -#else - Address be_counter (Lmethod, Method::backedge_counter_offset() + - InvocationCounter::counter_offset()); - Address inv_counter(Lmethod, Method::invocation_counter_offset() + - InvocationCounter::counter_offset()); -#endif /* CC_INTERP */ + int delta = InvocationCounter::count_increment; // Load each counter in a register ld( be_counter, Rtmp ); diff --git a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp index bf84848aa61..1a7901526ad 100644 --- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp +++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -263,8 +263,9 @@ class InterpreterMacroAssembler: public MacroAssembler { void compute_stack_base( Register Rdest ); #endif /* CC_INTERP */ - void increment_invocation_counter( Register Rtmp, Register Rtmp2 ); - void increment_backedge_counter( Register Rtmp, Register Rtmp2 ); + void get_method_counters(Register method, Register Rcounters, Label& skip); + void increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ); + void increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ); #ifndef CC_INTERP void test_backedge_count_for_osr( Register backedge_count, Register branch_bcp, Register Rtmp ); diff --git a/hotspot/src/cpu/sparc/vm/sparc.ad b/hotspot/src/cpu/sparc/vm/sparc.ad index 96accce0256..ebbce2e7741 100644 --- a/hotspot/src/cpu/sparc/vm/sparc.ad +++ b/hotspot/src/cpu/sparc/vm/sparc.ad @@ -1,5 +1,5 @@ // -// Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -1655,53 +1655,6 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const { return ra_->C->scratch_emit_size(this); } -//============================================================================= - -// emit call stub, compiled java to interpretor -void emit_java_to_interp(CodeBuffer &cbuf ) { - - // Stub is fixed up when the corresponding call is converted from calling - // compiled code to calling interpreted code. - // set (empty), G5 - // jmp -1 - - address mark = cbuf.insts_mark(); // get mark within main instrs section - - MacroAssembler _masm(&cbuf); - - address base = - __ start_a_stub(Compile::MAX_stubs_size); - if (base == NULL) return; // CodeBuffer::expand failed - - // static stub relocation stores the instruction address of the call - __ relocate(static_stub_Relocation::spec(mark)); - - __ set_metadata(NULL, reg_to_register_object(Matcher::inline_cache_reg_encode())); - - __ set_inst_mark(); - AddressLiteral addrlit(-1); - __ JUMP(addrlit, G3, 0); - - __ delayed()->nop(); - - // Update current stubs pointer and restore code_end. - __ end_a_stub(); -} - -// size of call stub, compiled java to interpretor -uint size_java_to_interp() { - // This doesn't need to be accurate but it must be larger or equal to - // the real size of the stub. - return (NativeMovConstReg::instruction_size + // sethi/setlo; - NativeJump::instruction_size + // sethi; jmp; nop - (TraceJumps ? 20 * BytesPerInstWord : 0) ); -} -// relocation entries for call stub, compiled java to interpretor -uint reloc_java_to_interp() { - return 10; // 4 in emit_java_to_interp + 1 in Java_Static_Call -} - - //============================================================================= #ifndef PRODUCT void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { @@ -2576,15 +2529,15 @@ encode %{ enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine // who we intended to call. - if ( !_method ) { + if (!_method) { emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type); } else if (_optimized_virtual) { emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type); } else { emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type); } - if( _method ) { // Emit stub for static call - emit_java_to_interp(cbuf); + if (_method) { // Emit stub for static call. + CompiledStaticCall::emit_to_interp_stub(cbuf); } %} @@ -8223,10 +8176,25 @@ instruct cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp, flagsReg ccr ) %{ format %{ "SUBcc $p,$q,$p\t! p' = p-q\n\t" "ADD $p,$y,$tmp\t! g3=p-q+y\n\t" "MOVlt $tmp,$p\t! p' < 0 ? p'+y : p'" %} - ins_encode( enc_cadd_cmpLTMask(p, q, y, tmp) ); - ins_pipe( cadd_cmpltmask ); + ins_encode(enc_cadd_cmpLTMask(p, q, y, tmp)); + ins_pipe(cadd_cmpltmask); %} +instruct and_cmpLTMask(iRegI p, iRegI q, iRegI y, flagsReg ccr) %{ + match(Set p (AndI (CmpLTMask p q) y)); + effect(KILL ccr); + ins_cost(DEFAULT_COST*3); + + format %{ "CMP $p,$q\n\t" + "MOV $y,$p\n\t" + "MOVge G0,$p" %} + ins_encode %{ + __ cmp($p$$Register, $q$$Register); + __ mov($y$$Register, $p$$Register); + __ movcc(Assembler::greaterEqual, false, Assembler::icc, G0, $p$$Register); + %} + ins_pipe(ialu_reg_reg_ialu); +%} //----------------------------------------------------------------- // Direct raw moves between float and general registers using VIS3. diff --git a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp index 4a6372ae0de..d8281cadaec 100644 --- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp +++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -292,11 +292,15 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state) // ??: invocation counter // void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { - // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. + // Note: In tiered we increment either counters in MethodCounters* or in + // MDO depending if we're profiling or not. + const Register Rcounters = G3_scratch; + Label done; + if (TieredCompilation) { const int increment = InvocationCounter::count_increment; const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; - Label no_mdo, done; + Label no_mdo; if (ProfileInterpreter) { // If no method data exists, go to profile_continue. __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); @@ -311,23 +315,26 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile __ ba_short(done); } - // Increment counter in Method* + // Increment counter in MethodCounters* __ bind(no_mdo); - Address invocation_counter(Lmethod, - in_bytes(Method::invocation_counter_offset()) + - in_bytes(InvocationCounter::counter_offset())); + Address invocation_counter(Rcounters, + in_bytes(MethodCounters::invocation_counter_offset()) + + in_bytes(InvocationCounter::counter_offset())); + __ get_method_counters(Lmethod, Rcounters, done); __ increment_mask_and_jump(invocation_counter, increment, mask, - G3_scratch, Lscratch, + G4_scratch, Lscratch, Assembler::zero, overflow); __ bind(done); } else { // Update standard invocation counters - __ increment_invocation_counter(O0, G3_scratch); - if (ProfileInterpreter) { // %%% Merge this into MethodData* - Address interpreter_invocation_counter(Lmethod,in_bytes(Method::interpreter_invocation_counter_offset())); - __ ld(interpreter_invocation_counter, G3_scratch); - __ inc(G3_scratch); - __ st(G3_scratch, interpreter_invocation_counter); + __ get_method_counters(Lmethod, Rcounters, done); + __ increment_invocation_counter(Rcounters, O0, G4_scratch); + if (ProfileInterpreter) { + Address interpreter_invocation_counter(Rcounters, + in_bytes(MethodCounters::interpreter_invocation_counter_offset())); + __ ld(interpreter_invocation_counter, G4_scratch); + __ inc(G4_scratch); + __ st(G4_scratch, interpreter_invocation_counter); } if (ProfileInterpreter && profile_method != NULL) { @@ -345,6 +352,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile __ cmp(O0, G3_scratch); __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance __ delayed()->nop(); + __ bind(done); } } diff --git a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp index 01d593fb034..00d1079742b 100644 --- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp +++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp @@ -63,6 +63,13 @@ static void do_oop_store(InterpreterMacroAssembler* _masm, noreg /* pre_val */, tmp, true /*preserve_o_regs*/); + // G1 barrier needs uncompressed oop for region cross check. + Register new_val = val; + if (UseCompressedOops && val != G0) { + new_val = tmp; + __ mov(val, new_val); + } + if (index == noreg ) { assert(Assembler::is_simm13(offset), "fix this code"); __ store_heap_oop(val, base, offset); @@ -79,7 +86,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm, __ add(base, index, base); } } - __ g1_write_barrier_post(base, val, tmp); + __ g1_write_barrier_post(base, new_val, tmp); } } break; @@ -1604,9 +1611,8 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { // Normal (non-jsr) branch handling // Save the current Lbcp - const Register O0_cur_bcp = O0; - __ mov( Lbcp, O0_cur_bcp ); - + const Register l_cur_bcp = Lscratch; + __ mov( Lbcp, l_cur_bcp ); bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; if ( increment_invocation_counter_for_backward_branches ) { @@ -1616,6 +1622,9 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { // Bump bytecode pointer by displacement (take the branch) __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr + const Register Rcounters = G3_scratch; + __ get_method_counters(Lmethod, Rcounters, Lforward); + if (TieredCompilation) { Label Lno_mdo, Loverflow; int increment = InvocationCounter::count_increment; @@ -1628,21 +1637,22 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { // Increment backedge counter in the MDO Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset())); - __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch, + __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0, Assembler::notZero, &Lforward); __ ba_short(Loverflow); } - // If there's no MDO, increment counter in Method* + // If there's no MDO, increment counter in MethodCounters* __ bind(Lno_mdo); - Address backedge_counter(Lmethod, in_bytes(Method::backedge_counter_offset()) + - in_bytes(InvocationCounter::counter_offset())); - __ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch, + Address backedge_counter(Rcounters, + in_bytes(MethodCounters::backedge_counter_offset()) + + in_bytes(InvocationCounter::counter_offset())); + __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0, Assembler::notZero, &Lforward); __ bind(Loverflow); // notify point for loop, pass branch bytecode - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp); // Was an OSR adapter generated? // O0 = osr nmethod @@ -1679,15 +1689,15 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { } else { // Update Backedge branch separately from invocations const Register G4_invoke_ctr = G4; - __ increment_backedge_counter(G4_invoke_ctr, G1_scratch); + __ increment_backedge_counter(Rcounters, G4_invoke_ctr, G1_scratch); if (ProfileInterpreter) { __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward); if (UseOnStackReplacement) { - __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch); + __ test_backedge_count_for_osr(O2_bumped_count, l_cur_bcp, G3_scratch); } } else { if (UseOnStackReplacement) { - __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch); + __ test_backedge_count_for_osr(G4_invoke_ctr, l_cur_bcp, G3_scratch); } } } diff --git a/hotspot/src/cpu/x86/vm/compiledIC_x86.cpp b/hotspot/src/cpu/x86/vm/compiledIC_x86.cpp new file mode 100644 index 00000000000..957695fdc32 --- /dev/null +++ b/hotspot/src/cpu/x86/vm/compiledIC_x86.cpp @@ -0,0 +1,180 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "code/compiledIC.hpp" +#include "code/icBuffer.hpp" +#include "code/nmethod.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/safepoint.hpp" + +// Release the CompiledICHolder* associated with this call site is there is one. +void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + if (is_icholder_entry(call->destination())) { + NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value()); + InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data()); + } +} + +bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + return is_icholder_entry(call->destination()); +} + +//----------------------------------------------------------------------------- +// High-level access to an inline cache. Guaranteed to be MT-safe. + +CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) + : _ic_call(call) +{ + address ic_call = call->instruction_address(); + + assert(ic_call != NULL, "ic_call address must be set"); + assert(nm != NULL, "must pass nmethod"); + assert(nm->contains(ic_call), "must be in nmethod"); + + // Search for the ic_call at the given address. + RelocIterator iter(nm, ic_call, ic_call+1); + bool ret = iter.next(); + assert(ret == true, "relocInfo must exist at this address"); + assert(iter.addr() == ic_call, "must find ic_call"); + if (iter.type() == relocInfo::virtual_call_type) { + virtual_call_Relocation* r = iter.virtual_call_reloc(); + _is_optimized = false; + _value = nativeMovConstReg_at(r->cached_value()); + } else { + assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); + _is_optimized = true; + _value = NULL; + } +} + +// ---------------------------------------------------------------------------- + +#define __ _masm. +void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { + // Stub is fixed up when the corresponding call is converted from + // calling compiled code to calling interpreted code. + // movq rbx, 0 + // jmp -5 # to self + + address mark = cbuf.insts_mark(); // Get mark within main instrs section. + + // Note that the code buffer's insts_mark is always relative to insts. + // That's why we must use the macroassembler to generate a stub. + MacroAssembler _masm(&cbuf); + + address base = + __ start_a_stub(to_interp_stub_size()*2); + if (base == NULL) return; // CodeBuffer::expand failed. + // Static stub relocation stores the instruction address of the call. + __ relocate(static_stub_Relocation::spec(mark), Assembler::imm_operand); + // Static stub relocation also tags the Method* in the code-stream. + __ mov_metadata(rbx, (Metadata*) NULL); // Method is zapped till fixup time. + // This is recognized as unresolved by relocs/nativeinst/ic code. + __ jump(RuntimeAddress(__ pc())); + + // Update current stubs pointer and restore insts_end. + __ end_a_stub(); +} +#undef __ + +int CompiledStaticCall::to_interp_stub_size() { + return NOT_LP64(10) // movl; jmp + LP64_ONLY(15); // movq (1+1+8); jmp (1+4) +} + +// Relocation entries for call stub, compiled java to interpreter. +int CompiledStaticCall::reloc_to_interp_stub() { + return 4; // 3 in emit_to_interp_stub + 1 in emit_call +} + +void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { + address stub = find_stub(); + guarantee(stub != NULL, "stub not found"); + + if (TraceICs) { + ResourceMark rm; + tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", + instruction_address(), + callee->name_and_sig_as_C_string()); + } + + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + + assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), + "a) MT-unsafe modification of inline cache"); + assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, + "b) MT-unsafe modification of inline cache"); + + // Update stub. + method_holder->set_data((intptr_t)callee()); + jump->set_jump_destination(entry); + + // Update jump to call. + set_destination_mt_safe(stub); +} + +void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); + // Reset stub. + address stub = static_stub->addr(); + assert(stub != NULL, "stub not found"); + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + method_holder->set_data(0); + jump->set_jump_destination((address)-1); +} + +//----------------------------------------------------------------------------- +// Non-product mode code +#ifndef PRODUCT + +void CompiledStaticCall::verify() { + // Verify call. + NativeCall::verify(); + if (os::is_MP()) { + verify_alignment(); + } + + // Verify stub. + address stub = find_stub(); + assert(stub != NULL, "no stub found for static call"); + // Creation also verifies the object. + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + + // Verify state. + assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); +} + +#endif // !PRODUCT diff --git a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp index 24e6694082e..08db8e074d0 100644 --- a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp +++ b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -570,20 +570,28 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register // rcx: invocation counter // void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { + Label done; + const Address invocation_counter(rax, + MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset()); + const Address backedge_counter (rax, + MethodCounter::backedge_counter_offset() + + InvocationCounter::counter_offset()); - const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset()); - const Address backedge_counter (rbx, Method::backedge_counter_offset() + InvocationCounter::counter_offset()); + __ get_method_counters(rbx, rax, done); - if (ProfileInterpreter) { // %%% Merge this into MethodData* - __ incrementl(Address(rbx,Method::interpreter_invocation_counter_offset())); + if (ProfileInterpreter) { + __ incrementl(Address(rax, + MethodCounters::interpreter_invocation_counter_offset())); } // Update standard invocation counters - __ movl(rax, backedge_counter); // load backedge counter - + __ movl(rcx, invocation_counter); __ increment(rcx, InvocationCounter::count_increment); + __ movl(invocation_counter, rcx); // save invocation count + + __ movl(rax, backedge_counter); // load backedge counter __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits - __ movl(invocation_counter, rcx); // save invocation count __ addl(rcx, rax); // add both counters // profile_method is non-null only for interpreted method so @@ -593,7 +601,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); __ jcc(Assembler::aboveEqual, *overflow); - + __ bind(done); } void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { @@ -977,7 +985,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) { address entry_point = __ pc(); const Address constMethod (rbx, Method::const_offset()); - const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset()); const Address access_flags (rbx, Method::access_flags_offset()); const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset()); @@ -1029,8 +1036,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) { } #endif - if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count - const Register unlock_thread = LP64_ONLY(r15_thread) NOT_LP64(rax); NOT_LP64(__ movptr(unlock_thread, STATE(_thread));) // get thread // Since at this point in the method invocation the exception handler diff --git a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp index 78d97a975b6..0e7f483a649 100644 --- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp +++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -266,6 +266,20 @@ void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, R addptr(cache, tmp); // construct pointer to cache entry } +void InterpreterMacroAssembler::get_method_counters(Register method, + Register mcs, Label& skip) { + Label has_counters; + movptr(mcs, Address(method, Method::method_counters_offset())); + testptr(mcs, mcs); + jcc(Assembler::notZero, has_counters); + call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::build_method_counters), method); + movptr(mcs, Address(method,Method::method_counters_offset())); + testptr(mcs, mcs); + jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory + bind(has_counters); +} + // Load object from cpool->resolved_references(index) void InterpreterMacroAssembler::load_resolved_reference_at_index( Register result, Register index) { diff --git a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp index 6dada56de0d..49a41f61d54 100644 --- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp +++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,6 +89,7 @@ class InterpreterMacroAssembler: public MacroAssembler { void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2)); + void get_method_counters(Register method, Register mcs, Label& skip); // load cpool->resolved_references(index); void load_resolved_reference_at_index(Register result, Register index); diff --git a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp index 890826310a8..88d57d046a5 100644 --- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp +++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -271,6 +271,20 @@ void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, addptr(cache, tmp); // construct pointer to cache entry } +void InterpreterMacroAssembler::get_method_counters(Register method, + Register mcs, Label& skip) { + Label has_counters; + movptr(mcs, Address(method, Method::method_counters_offset())); + testptr(mcs, mcs); + jcc(Assembler::notZero, has_counters); + call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::build_method_counters), method); + movptr(mcs, Address(method,Method::method_counters_offset())); + testptr(mcs, mcs); + jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory + bind(has_counters); +} + // Load object from cpool->resolved_references(index) void InterpreterMacroAssembler::load_resolved_reference_at_index( Register result, Register index) { diff --git a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp index 66a001366ff..8bfc44fe4bc 100644 --- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp +++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -111,6 +111,7 @@ class InterpreterMacroAssembler: public MacroAssembler { void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2)); + void get_method_counters(Register method, Register mcs, Label& skip); // load cpool->resolved_references(index); void load_resolved_reference_at_index(Register result, Register index); diff --git a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp index fb13a44045a..ca3ab92f2d0 100644 --- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp +++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -344,13 +344,13 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, // rcx: invocation counter // void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { - const Address invocation_counter(rbx, in_bytes(Method::invocation_counter_offset()) + - in_bytes(InvocationCounter::counter_offset())); - // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. + Label done; + // Note: In tiered we increment either counters in MethodCounters* or in MDO + // depending if we're profiling or not. if (TieredCompilation) { int increment = InvocationCounter::count_increment; int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; - Label no_mdo, done; + Label no_mdo; if (ProfileInterpreter) { // Are we profiling? __ movptr(rax, Address(rbx, Method::method_data_offset())); @@ -360,26 +360,41 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset())); __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); - __ jmpb(done); + __ jmp(done); } __ bind(no_mdo); - // Increment counter in Method* (we don't need to load it, it's in rcx). - __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow); + // Increment counter in MethodCounters + const Address invocation_counter(rax, + MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset()); + + __ get_method_counters(rbx, rax, done); + __ increment_mask_and_jump(invocation_counter, increment, mask, + rcx, false, Assembler::zero, overflow); __ bind(done); } else { - const Address backedge_counter (rbx, Method::backedge_counter_offset() + - InvocationCounter::counter_offset()); + const Address backedge_counter (rax, + MethodCounters::backedge_counter_offset() + + InvocationCounter::counter_offset()); + const Address invocation_counter(rax, + MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset()); - if (ProfileInterpreter) { // %%% Merge this into MethodData* - __ incrementl(Address(rbx,Method::interpreter_invocation_counter_offset())); + __ get_method_counters(rbx, rax, done); + + if (ProfileInterpreter) { + __ incrementl(Address(rax, + MethodCounters::interpreter_invocation_counter_offset())); } - // Update standard invocation counters - __ movl(rax, backedge_counter); // load backedge counter + // Update standard invocation counters + __ movl(rcx, invocation_counter); __ incrementl(rcx, InvocationCounter::count_increment); + __ movl(invocation_counter, rcx); // save invocation count + + __ movl(rax, backedge_counter); // load backedge counter __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits - __ movl(invocation_counter, rcx); // save invocation count __ addl(rcx, rax); // add both counters // profile_method is non-null only for interpreted method so @@ -399,6 +414,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); __ jcc(Assembler::aboveEqual, *overflow); + __ bind(done); } } @@ -868,7 +884,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) { address entry_point = __ pc(); const Address constMethod (rbx, Method::const_offset()); - const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset()); const Address access_flags (rbx, Method::access_flags_offset()); const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset()); @@ -897,9 +912,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) { // NULL oop temp (mirror or jni oop result) __ push((int32_t)NULL_WORD); - if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count // initialize fixed part of activation frame - generate_fixed_frame(true); // make sure method is native & not abstract @@ -1286,7 +1299,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) { address entry_point = __ pc(); const Address constMethod (rbx, Method::const_offset()); - const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset()); const Address access_flags (rbx, Method::access_flags_offset()); const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset()); const Address size_of_locals (rdx, ConstMethod::size_of_locals_offset()); @@ -1326,7 +1338,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) { __ bind(exit); } - if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count // initialize fixed part of activation frame generate_fixed_frame(false); diff --git a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp index 6b3f7b6ba26..4ad577ca4c9 100644 --- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp +++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -300,13 +300,12 @@ void InterpreterGenerator::generate_counter_incr( Label* overflow, Label* profile_method, Label* profile_method_continue) { - const Address invocation_counter(rbx, in_bytes(Method::invocation_counter_offset()) + - in_bytes(InvocationCounter::counter_offset())); + Label done; // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. if (TieredCompilation) { int increment = InvocationCounter::count_increment; int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; - Label no_mdo, done; + Label no_mdo; if (ProfileInterpreter) { // Are we profiling? __ movptr(rax, Address(rbx, Method::method_data_offset())); @@ -316,28 +315,39 @@ void InterpreterGenerator::generate_counter_incr( const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset())); __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); - __ jmpb(done); + __ jmp(done); } __ bind(no_mdo); - // Increment counter in Method* (we don't need to load it, it's in ecx). - __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow); + // Increment counter in MethodCounters + const Address invocation_counter(rax, + MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset()); + __ get_method_counters(rbx, rax, done); + __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, + false, Assembler::zero, overflow); __ bind(done); } else { - const Address backedge_counter(rbx, - Method::backedge_counter_offset() + - InvocationCounter::counter_offset()); + const Address backedge_counter(rax, + MethodCounters::backedge_counter_offset() + + InvocationCounter::counter_offset()); + const Address invocation_counter(rax, + MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset()); - if (ProfileInterpreter) { // %%% Merge this into MethodData* - __ incrementl(Address(rbx, - Method::interpreter_invocation_counter_offset())); + __ get_method_counters(rbx, rax, done); + + if (ProfileInterpreter) { + __ incrementl(Address(rax, + MethodCounters::interpreter_invocation_counter_offset())); } // Update standard invocation counters - __ movl(rax, backedge_counter); // load backedge counter - + __ movl(rcx, invocation_counter); __ incrementl(rcx, InvocationCounter::count_increment); + __ movl(invocation_counter, rcx); // save invocation count + + __ movl(rax, backedge_counter); // load backedge counter __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits - __ movl(invocation_counter, rcx); // save invocation count __ addl(rcx, rax); // add both counters // profile_method is non-null only for interpreted method so @@ -354,6 +364,7 @@ void InterpreterGenerator::generate_counter_incr( __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); __ jcc(Assembler::aboveEqual, *overflow); + __ bind(done); } } @@ -843,9 +854,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) { address entry_point = __ pc(); const Address constMethod (rbx, Method::const_offset()); - const Address invocation_counter(rbx, Method:: - invocation_counter_offset() + - InvocationCounter::counter_offset()); const Address access_flags (rbx, Method::access_flags_offset()); const Address size_of_parameters(rcx, ConstMethod:: size_of_parameters_offset()); @@ -876,10 +884,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) { // (static native method holder mirror/jni oop result) __ push((int) NULL_WORD); - if (inc_counter) { - __ movl(rcx, invocation_counter); // (pre-)fetch invocation count - } - // initialize fixed part of activation frame generate_fixed_frame(true); @@ -1296,9 +1300,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) { address entry_point = __ pc(); const Address constMethod(rbx, Method::const_offset()); - const Address invocation_counter(rbx, - Method::invocation_counter_offset() + - InvocationCounter::counter_offset()); const Address access_flags(rbx, Method::access_flags_offset()); const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset()); @@ -1343,10 +1344,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) { __ bind(exit); } - // (pre-)fetch invocation count - if (inc_counter) { - __ movl(rcx, invocation_counter); - } // initialize fixed part of activation frame generate_fixed_frame(false); diff --git a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp index 371517c0537..6c6cc4f9474 100644 --- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp +++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1546,9 +1546,10 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { __ get_method(rcx); // ECX holds method __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count - const ByteSize be_offset = Method::backedge_counter_offset() + InvocationCounter::counter_offset(); - const ByteSize inv_offset = Method::invocation_counter_offset() + InvocationCounter::counter_offset(); - const int method_offset = frame::interpreter_frame_method_offset * wordSize; + const ByteSize be_offset = MethodCounters::backedge_counter_offset() + + InvocationCounter::counter_offset(); + const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset(); // Load up EDX with the branch displacement __ movl(rdx, at_bcp(1)); @@ -1596,6 +1597,22 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { __ testl(rdx, rdx); // check if forward or backward branch __ jcc(Assembler::positive, dispatch); // count only if backward branch + // check if MethodCounters exists + Label has_counters; + __ movptr(rax, Address(rcx, Method::method_counters_offset())); + __ testptr(rax, rax); + __ jcc(Assembler::notZero, has_counters); + __ push(rdx); + __ push(rcx); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), + rcx); + __ pop(rcx); + __ pop(rdx); + __ movptr(rax, Address(rcx, Method::method_counters_offset())); + __ testptr(rax, rax); + __ jcc(Assembler::zero, dispatch); + __ bind(has_counters); + if (TieredCompilation) { Label no_mdo; int increment = InvocationCounter::count_increment; @@ -1613,16 +1630,19 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { __ jmp(dispatch); } __ bind(no_mdo); - // Increment backedge counter in Method* + // Increment backedge counter in MethodCounters* + __ movptr(rcx, Address(rcx, Method::method_counters_offset())); __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask, rax, false, Assembler::zero, &backedge_counter_overflow); } else { // increment counter + __ movptr(rcx, Address(rcx, Method::method_counters_offset())); __ movl(rax, Address(rcx, be_offset)); // load backedge counter __ incrementl(rax, InvocationCounter::count_increment); // increment counter __ movl(Address(rcx, be_offset), rax); // store counter __ movl(rax, Address(rcx, inv_offset)); // load invocation counter + __ andl(rax, InvocationCounter::count_mask_value); // and the status bits __ addl(rax, Address(rcx, be_offset)); // add both counters diff --git a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp index d1e22d25990..e97c5386c7a 100644 --- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp +++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -158,14 +158,19 @@ static void do_oop_store(InterpreterMacroAssembler* _masm, if (val == noreg) { __ store_heap_oop_null(Address(rdx, 0)); } else { + // G1 barrier needs uncompressed oop for region cross check. + Register new_val = val; + if (UseCompressedOops) { + new_val = rbx; + __ movptr(new_val, val); + } __ store_heap_oop(Address(rdx, 0), val); __ g1_write_barrier_post(rdx /* store_adr */, - val /* new_val */, + new_val /* new_val */, r15_thread /* thread */, r8 /* tmp */, rbx /* tmp2 */); } - } break; #endif // INCLUDE_ALL_GCS @@ -1564,11 +1569,10 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx // holds bumped taken count - const ByteSize be_offset = Method::backedge_counter_offset() + + const ByteSize be_offset = MethodCounters::backedge_counter_offset() + InvocationCounter::counter_offset(); - const ByteSize inv_offset = Method::invocation_counter_offset() + + const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + InvocationCounter::counter_offset(); - const int method_offset = frame::interpreter_frame_method_offset * wordSize; // Load up edx with the branch displacement __ movl(rdx, at_bcp(1)); @@ -1618,6 +1622,22 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { // r14: locals pointer __ testl(rdx, rdx); // check if forward or backward branch __ jcc(Assembler::positive, dispatch); // count only if backward branch + + // check if MethodCounters exists + Label has_counters; + __ movptr(rax, Address(rcx, Method::method_counters_offset())); + __ testptr(rax, rax); + __ jcc(Assembler::notZero, has_counters); + __ push(rdx); + __ push(rcx); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), + rcx); + __ pop(rcx); + __ pop(rdx); + __ movptr(rax, Address(rcx, Method::method_counters_offset())); + __ jcc(Assembler::zero, dispatch); + __ bind(has_counters); + if (TieredCompilation) { Label no_mdo; int increment = InvocationCounter::count_increment; @@ -1635,16 +1655,19 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { __ jmp(dispatch); } __ bind(no_mdo); - // Increment backedge counter in Method* + // Increment backedge counter in MethodCounters* + __ movptr(rcx, Address(rcx, Method::method_counters_offset())); __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask, rax, false, Assembler::zero, &backedge_counter_overflow); } else { // increment counter + __ movptr(rcx, Address(rcx, Method::method_counters_offset())); __ movl(rax, Address(rcx, be_offset)); // load backedge counter __ incrementl(rax, InvocationCounter::count_increment); // increment counter __ movl(Address(rcx, be_offset), rax); // store counter __ movl(rax, Address(rcx, inv_offset)); // load invocation counter + __ andl(rax, InvocationCounter::count_mask_value); // and the status bits __ addl(rax, Address(rcx, be_offset)); // add both counters diff --git a/hotspot/src/cpu/x86/vm/x86_32.ad b/hotspot/src/cpu/x86/vm/x86_32.ad index 67f33d3ba27..316ebe2911d 100644 --- a/hotspot/src/cpu/x86/vm/x86_32.ad +++ b/hotspot/src/cpu/x86/vm/x86_32.ad @@ -1,5 +1,5 @@ // -// Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -1256,43 +1256,6 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const { } } -//============================================================================= - -// emit call stub, compiled java to interpreter -void emit_java_to_interp(CodeBuffer &cbuf ) { - // Stub is fixed up when the corresponding call is converted from calling - // compiled code to calling interpreted code. - // mov rbx,0 - // jmp -1 - - address mark = cbuf.insts_mark(); // get mark within main instrs section - - // Note that the code buffer's insts_mark is always relative to insts. - // That's why we must use the macroassembler to generate a stub. - MacroAssembler _masm(&cbuf); - - address base = - __ start_a_stub(Compile::MAX_stubs_size); - if (base == NULL) return; // CodeBuffer::expand failed - // static stub relocation stores the instruction address of the call - __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM32); - // static stub relocation also tags the Method* in the code-stream. - __ mov_metadata(rbx, (Metadata*)NULL); // method is zapped till fixup time - // This is recognized as unresolved by relocs/nativeInst/ic code - __ jump(RuntimeAddress(__ pc())); - - __ end_a_stub(); - // Update current stubs pointer and restore insts_end. -} -// size of call stub, compiled java to interpretor -uint size_java_to_interp() { - return 10; // movl; jmp -} -// relocation entries for call stub, compiled java to interpretor -uint reloc_java_to_interp() { - return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call -} - //============================================================================= #ifndef PRODUCT void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const { @@ -1909,8 +1872,8 @@ encode %{ emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4), static_call_Relocation::spec(), RELOC_IMM32 ); } - if (_method) { // Emit stub for static call - emit_java_to_interp(cbuf); + if (_method) { // Emit stub for static call. + CompiledStaticCall::emit_to_interp_stub(cbuf); } %} @@ -2317,30 +2280,6 @@ encode %{ emit_rm(cbuf, 0x3, $p$$reg, tmpReg); %} - enc_class enc_cmpLTP_mem(rRegI p, rRegI q, memory mem, eCXRegI tmp) %{ // cadd_cmpLT - int tmpReg = $tmp$$reg; - - // SUB $p,$q - emit_opcode(cbuf,0x2B); - emit_rm(cbuf, 0x3, $p$$reg, $q$$reg); - // SBB $tmp,$tmp - emit_opcode(cbuf,0x1B); - emit_rm(cbuf, 0x3, tmpReg, tmpReg); - // AND $tmp,$y - cbuf.set_insts_mark(); // Mark start of opcode for reloc info in mem operand - emit_opcode(cbuf,0x23); - int reg_encoding = tmpReg; - int base = $mem$$base; - int index = $mem$$index; - int scale = $mem$$scale; - int displace = $mem$$disp; - relocInfo::relocType disp_reloc = $mem->disp_reloc(); - encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc); - // ADD $p,$tmp - emit_opcode(cbuf,0x03); - emit_rm(cbuf, 0x3, $p$$reg, tmpReg); - %} - enc_class shift_left_long( eRegL dst, eCXRegI shift ) %{ // TEST shift,32 emit_opcode(cbuf,0xF7); @@ -8922,9 +8861,9 @@ instruct convP2B( rRegI dst, eRegP src, eFlagsReg cr ) %{ %} %} -instruct cmpLTMask( eCXRegI dst, ncxRegI p, ncxRegI q, eFlagsReg cr ) %{ +instruct cmpLTMask(eCXRegI dst, ncxRegI p, ncxRegI q, eFlagsReg cr) %{ match(Set dst (CmpLTMask p q)); - effect( KILL cr ); + effect(KILL cr); ins_cost(400); // SETlt can only use low byte of EAX,EBX, ECX, or EDX as destination @@ -8932,50 +8871,83 @@ instruct cmpLTMask( eCXRegI dst, ncxRegI p, ncxRegI q, eFlagsReg cr ) %{ "CMP $p,$q\n\t" "SETlt $dst\n\t" "NEG $dst" %} - ins_encode( OpcRegReg(0x33,dst,dst), - OpcRegReg(0x3B,p,q), - setLT_reg(dst), neg_reg(dst) ); - ins_pipe( pipe_slow ); + ins_encode %{ + Register Rp = $p$$Register; + Register Rq = $q$$Register; + Register Rd = $dst$$Register; + Label done; + __ xorl(Rd, Rd); + __ cmpl(Rp, Rq); + __ setb(Assembler::less, Rd); + __ negl(Rd); + %} + + ins_pipe(pipe_slow); %} -instruct cmpLTMask0( rRegI dst, immI0 zero, eFlagsReg cr ) %{ +instruct cmpLTMask0(rRegI dst, immI0 zero, eFlagsReg cr) %{ match(Set dst (CmpLTMask dst zero)); - effect( DEF dst, KILL cr ); + effect(DEF dst, KILL cr); ins_cost(100); - format %{ "SAR $dst,31" %} - opcode(0xC1, 0x7); /* C1 /7 ib */ - ins_encode( RegOpcImm( dst, 0x1F ) ); - ins_pipe( ialu_reg ); + format %{ "SAR $dst,31\t# cmpLTMask0" %} + ins_encode %{ + __ sarl($dst$$Register, 31); + %} + ins_pipe(ialu_reg); %} - -instruct cadd_cmpLTMask( ncxRegI p, ncxRegI q, ncxRegI y, eCXRegI tmp, eFlagsReg cr ) %{ +/* better to save a register than avoid a branch */ +instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, eFlagsReg cr) %{ match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))); - effect( KILL tmp, KILL cr ); + effect(KILL cr); ins_cost(400); - // annoyingly, $tmp has no edges so you cant ask for it in - // any format or encoding - format %{ "SUB $p,$q\n\t" - "SBB ECX,ECX\n\t" - "AND ECX,$y\n\t" - "ADD $p,ECX" %} - ins_encode( enc_cmpLTP(p,q,y,tmp) ); - ins_pipe( pipe_cmplt ); + format %{ "SUB $p,$q\t# cadd_cmpLTMask\n\t" + "JGE done\n\t" + "ADD $p,$y\n" + "done: " %} + ins_encode %{ + Register Rp = $p$$Register; + Register Rq = $q$$Register; + Register Ry = $y$$Register; + Label done; + __ subl(Rp, Rq); + __ jccb(Assembler::greaterEqual, done); + __ addl(Rp, Ry); + __ bind(done); + %} + + ins_pipe(pipe_cmplt); +%} + +/* better to save a register than avoid a branch */ +instruct and_cmpLTMask(rRegI p, rRegI q, rRegI y, eFlagsReg cr) %{ + match(Set y (AndI (CmpLTMask p q) y)); + effect(KILL cr); + + ins_cost(300); + + format %{ "CMPL $p, $q\t# and_cmpLTMask\n\t" + "JLT done\n\t" + "XORL $y, $y\n" + "done: " %} + ins_encode %{ + Register Rp = $p$$Register; + Register Rq = $q$$Register; + Register Ry = $y$$Register; + Label done; + __ cmpl(Rp, Rq); + __ jccb(Assembler::less, done); + __ xorl(Ry, Ry); + __ bind(done); + %} + + ins_pipe(pipe_cmplt); %} /* If I enable this, I encourage spilling in the inner loop of compress. -instruct cadd_cmpLTMask_mem( ncxRegI p, ncxRegI q, memory y, eCXRegI tmp, eFlagsReg cr ) %{ +instruct cadd_cmpLTMask_mem(ncxRegI p, ncxRegI q, memory y, eCXRegI tmp, eFlagsReg cr) %{ match(Set p (AddI (AndI (CmpLTMask p q) (LoadI y)) (SubI p q))); - effect( USE_KILL tmp, KILL cr ); - ins_cost(400); - - format %{ "SUB $p,$q\n\t" - "SBB ECX,ECX\n\t" - "AND ECX,$y\n\t" - "ADD $p,ECX" %} - ins_encode( enc_cmpLTP_mem(p,q,y,tmp) ); -%} */ //----------Long Instructions------------------------------------------------ diff --git a/hotspot/src/cpu/x86/vm/x86_64.ad b/hotspot/src/cpu/x86/vm/x86_64.ad index a9b5820e273..e47976270cd 100644 --- a/hotspot/src/cpu/x86/vm/x86_64.ad +++ b/hotspot/src/cpu/x86/vm/x86_64.ad @@ -1,5 +1,5 @@ // -// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -1387,48 +1387,6 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const return (offset < 0x80) ? 5 : 8; // REX } -//============================================================================= - -// emit call stub, compiled java to interpreter -void emit_java_to_interp(CodeBuffer& cbuf) -{ - // Stub is fixed up when the corresponding call is converted from - // calling compiled code to calling interpreted code. - // movq rbx, 0 - // jmp -5 # to self - - address mark = cbuf.insts_mark(); // get mark within main instrs section - - // Note that the code buffer's insts_mark is always relative to insts. - // That's why we must use the macroassembler to generate a stub. - MacroAssembler _masm(&cbuf); - - address base = - __ start_a_stub(Compile::MAX_stubs_size); - if (base == NULL) return; // CodeBuffer::expand failed - // static stub relocation stores the instruction address of the call - __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64); - // static stub relocation also tags the Method* in the code-stream. - __ mov_metadata(rbx, (Metadata*) NULL); // method is zapped till fixup time - // This is recognized as unresolved by relocs/nativeinst/ic code - __ jump(RuntimeAddress(__ pc())); - - // Update current stubs pointer and restore insts_end. - __ end_a_stub(); -} - -// size of call stub, compiled java to interpretor -uint size_java_to_interp() -{ - return 15; // movq (1+1+8); jmp (1+4) -} - -// relocation entries for call stub, compiled java to interpretor -uint reloc_java_to_interp() -{ - return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call -} - //============================================================================= #ifndef PRODUCT void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const @@ -2078,8 +2036,8 @@ encode %{ RELOC_DISP32); } if (_method) { - // Emit stub for static call - emit_java_to_interp(cbuf); + // Emit stub for static call. + CompiledStaticCall::emit_to_interp_stub(cbuf); } %} @@ -2222,12 +2180,6 @@ encode %{ $$$emit32$src$$constant; %} - enc_class Con64(immL src) - %{ - // Output immediate - emit_d64($src$$constant); - %} - enc_class Con32F_as_bits(immF src) %{ // Output Float immediate bits @@ -7608,7 +7560,7 @@ instruct xaddI( memory mem, rRegI newval, rFlagsReg cr) %{ ins_pipe( pipe_cmpxchg ); %} -instruct xaddL_no_res( memory mem, Universe dummy, immL add, rFlagsReg cr) %{ +instruct xaddL_no_res( memory mem, Universe dummy, immL32 add, rFlagsReg cr) %{ predicate(n->as_LoadStore()->result_not_used()); match(Set dummy (GetAndAddL mem add)); effect(KILL cr); @@ -9434,7 +9386,7 @@ instruct cmpLTMask(rRegI dst, rRegI p, rRegI q, rFlagsReg cr) match(Set dst (CmpLTMask p q)); effect(KILL cr); - ins_cost(400); // XXX + ins_cost(400); format %{ "cmpl $p, $q\t# cmpLTMask\n\t" "setlt $dst\n\t" "movzbl $dst, $dst\n\t" @@ -9452,37 +9404,63 @@ instruct cmpLTMask0(rRegI dst, immI0 zero, rFlagsReg cr) match(Set dst (CmpLTMask dst zero)); effect(KILL cr); - ins_cost(100); // XXX + ins_cost(100); format %{ "sarl $dst, #31\t# cmpLTMask0" %} - opcode(0xC1, 0x7); /* C1 /7 ib */ - ins_encode(reg_opc_imm(dst, 0x1F)); + ins_encode %{ + __ sarl($dst$$Register, 31); + %} ins_pipe(ialu_reg); %} - -instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, rRegI tmp, rFlagsReg cr) +/* Better to save a register than avoid a branch */ +instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, rFlagsReg cr) %{ match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))); - effect(TEMP tmp, KILL cr); - - ins_cost(400); // XXX - format %{ "subl $p, $q\t# cadd_cmpLTMask1\n\t" - "sbbl $tmp, $tmp\n\t" - "andl $tmp, $y\n\t" - "addl $p, $tmp" %} + effect(KILL cr); + ins_cost(300); + format %{ "subl $p,$q\t# cadd_cmpLTMask\n\t" + "jge done\n\t" + "addl $p,$y\n" + "done: " %} ins_encode %{ Register Rp = $p$$Register; Register Rq = $q$$Register; Register Ry = $y$$Register; - Register Rt = $tmp$$Register; + Label done; __ subl(Rp, Rq); - __ sbbl(Rt, Rt); - __ andl(Rt, Ry); - __ addl(Rp, Rt); + __ jccb(Assembler::greaterEqual, done); + __ addl(Rp, Ry); + __ bind(done); %} ins_pipe(pipe_cmplt); %} +/* Better to save a register than avoid a branch */ +instruct and_cmpLTMask(rRegI p, rRegI q, rRegI y, rFlagsReg cr) +%{ + match(Set y (AndI (CmpLTMask p q) y)); + effect(KILL cr); + + ins_cost(300); + + format %{ "cmpl $p, $q\t# and_cmpLTMask\n\t" + "jlt done\n\t" + "xorl $y, $y\n" + "done: " %} + ins_encode %{ + Register Rp = $p$$Register; + Register Rq = $q$$Register; + Register Ry = $y$$Register; + Label done; + __ cmpl(Rp, Rq); + __ jccb(Assembler::less, done); + __ xorl(Ry, Ry); + __ bind(done); + %} + ins_pipe(pipe_cmplt); +%} + + //---------- FP Instructions------------------------------------------------ instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2) diff --git a/hotspot/src/cpu/zero/vm/compiledIC_zero.cpp b/hotspot/src/cpu/zero/vm/compiledIC_zero.cpp new file mode 100644 index 00000000000..6fa39ea078b --- /dev/null +++ b/hotspot/src/cpu/zero/vm/compiledIC_zero.cpp @@ -0,0 +1,122 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/systemDictionary.hpp" +#include "code/codeCache.hpp" +#include "code/compiledIC.hpp" +#include "code/icBuffer.hpp" +#include "code/nmethod.hpp" +#include "code/vtableStubs.hpp" +#include "interpreter/interpreter.hpp" +#include "interpreter/linkResolver.hpp" +#include "memory/metadataFactory.hpp" +#include "memory/oopFactory.hpp" +#include "oops/method.hpp" +#include "oops/oop.inline.hpp" +#include "oops/symbol.hpp" +#include "runtime/icache.hpp" +#include "runtime/sharedRuntime.hpp" +#include "runtime/stubRoutines.hpp" +#include "utilities/events.hpp" + + +// Release the CompiledICHolder* associated with this call site is there is one. +void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + if (is_icholder_entry(call->destination())) { + NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value()); + InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data()); + } +} + +bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { + // This call site might have become stale so inspect it carefully. + NativeCall* call = nativeCall_at(call_site->addr()); + return is_icholder_entry(call->destination()); +} + +//----------------------------------------------------------------------------- +// High-level access to an inline cache. Guaranteed to be MT-safe. + +CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) + : _ic_call(call) +{ + address ic_call = call->instruction_address(); + + assert(ic_call != NULL, "ic_call address must be set"); + assert(nm != NULL, "must pass nmethod"); + assert(nm->contains(ic_call), "must be in nmethod"); + + // Search for the ic_call at the given address. + RelocIterator iter(nm, ic_call, ic_call+1); + bool ret = iter.next(); + assert(ret == true, "relocInfo must exist at this address"); + assert(iter.addr() == ic_call, "must find ic_call"); + if (iter.type() == relocInfo::virtual_call_type) { + virtual_call_Relocation* r = iter.virtual_call_reloc(); + _is_optimized = false; + _value = nativeMovConstReg_at(r->cached_value()); + } else { + assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); + _is_optimized = true; + _value = NULL; + } +} + +// ---------------------------------------------------------------------------- + +void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { + ShouldNotReachHere(); // Only needed for COMPILER2. +} + +int CompiledStaticCall::to_interp_stub_size() { + ShouldNotReachHere(); // Only needed for COMPILER2. + return 0; +} + +// Relocation entries for call stub, compiled java to interpreter. +int CompiledStaticCall::reloc_to_interp_stub() { + ShouldNotReachHere(); // Only needed for COMPILER2. + return 0; +} + +void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { + ShouldNotReachHere(); // Only needed for COMPILER2. +} + +void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { + ShouldNotReachHere(); // Only needed for COMPILER2. +} + +//----------------------------------------------------------------------------- +// Non-product mode code. +#ifndef PRODUCT + +void CompiledStaticCall::verify() { + ShouldNotReachHere(); // Only needed for COMPILER2. +} + +#endif // !PRODUCT diff --git a/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp b/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp index a21245afdc1..21d979f3efb 100644 --- a/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp +++ b/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ #define PR_MODEL_LP64 2 #ifdef COMPILER1 -#if defined(DEBUG) || defined(FASTDEBUG) +#ifdef ASSERT /* * To avoid the most part of potential link errors @@ -84,7 +84,7 @@ address StubRoutines::_call_stub_return_address = NULL; StubQueue* AbstractInterpreter::_code = NULL; -#endif /* defined(DEBUG) || defined(FASTDEBUG) */ +#endif /* ASSERT */ #endif /* COMPILER1 */ #define GEN_OFFS(Type,Name) \ diff --git a/hotspot/src/os/bsd/vm/os_bsd.cpp b/hotspot/src/os/bsd/vm/os_bsd.cpp index fb09dd57e83..f0b32196f5d 100644 --- a/hotspot/src/os/bsd/vm/os_bsd.cpp +++ b/hotspot/src/os/bsd/vm/os_bsd.cpp @@ -1230,10 +1230,6 @@ bool os::dll_build_name(char* buffer, size_t buflen, return retval; } -const char* os::get_current_directory(char *buf, int buflen) { - return getcwd(buf, buflen); -} - // check if addr is inside libjvm.so bool os::address_is_in_vm(address addr) { static address libjvm_base_addr; @@ -2080,9 +2076,10 @@ static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) { flags |= MAP_FIXED; } - // Map uncommitted pages PROT_READ and PROT_WRITE, change access - // to PROT_EXEC if executable when we commit the page. - addr = (char*)::mmap(requested_addr, bytes, PROT_READ|PROT_WRITE, + // Map reserved/uncommitted pages PROT_NONE so we fail early if we + // touch an uncommitted page. Otherwise, the read/write might + // succeed if we have enough swap space to back the physical page. + addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, flags, -1, 0); if (addr != MAP_FAILED) { diff --git a/hotspot/src/os/linux/vm/os_linux.cpp b/hotspot/src/os/linux/vm/os_linux.cpp index 74b9cd2a1de..4ca47019569 100644 --- a/hotspot/src/os/linux/vm/os_linux.cpp +++ b/hotspot/src/os/linux/vm/os_linux.cpp @@ -119,6 +119,7 @@ int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL; Mutex* os::Linux::_createThread_lock = NULL; pthread_t os::Linux::_main_thread; int os::Linux::_page_size = -1; +const int os::Linux::_vm_default_page_size = (8 * K); bool os::Linux::_is_floating_stack = false; bool os::Linux::_is_NPTL = false; bool os::Linux::_supports_fast_thread_cpu_time = false; @@ -1662,10 +1663,6 @@ bool os::dll_build_name(char* buffer, size_t buflen, return retval; } -const char* os::get_current_directory(char *buf, int buflen) { - return getcwd(buf, buflen); -} - // check if addr is inside libjvm.so bool os::address_is_in_vm(address addr) { static address libjvm_base_addr; @@ -2906,9 +2903,10 @@ static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) { flags |= MAP_FIXED; } - // Map uncommitted pages PROT_READ and PROT_WRITE, change access - // to PROT_EXEC if executable when we commit the page. - addr = (char*)::mmap(requested_addr, bytes, PROT_READ|PROT_WRITE, + // Map reserved/uncommitted pages PROT_NONE so we fail early if we + // touch an uncommitted page. Otherwise, the read/write might + // succeed if we have enough swap space to back the physical page. + addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, flags, -1, 0); if (addr != MAP_FAILED) { @@ -4249,6 +4247,15 @@ void os::init(void) { Linux::clock_init(); initial_time_count = os::elapsed_counter(); pthread_mutex_init(&dl_mutex, NULL); + + // If the pagesize of the VM is greater than 8K determine the appropriate + // number of initial guard pages. The user can change this with the + // command line arguments, if needed. + if (vm_page_size() > (int)Linux::vm_default_page_size()) { + StackYellowPages = 1; + StackRedPages = 1; + StackShadowPages = round_to((StackShadowPages*Linux::vm_default_page_size()), vm_page_size()) / vm_page_size(); + } } // To install functions for atexit system call @@ -4302,8 +4309,8 @@ jint os::init_2(void) // Add in 2*BytesPerWord times page size to account for VM stack during // class initialization depending on 32 or 64 bit VM. os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed, - (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ - 2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::page_size()); + (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() + + (2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size()); size_t threadStackSizeInBytes = ThreadStackSize * K; if (threadStackSizeInBytes != 0 && diff --git a/hotspot/src/os/linux/vm/os_linux.hpp b/hotspot/src/os/linux/vm/os_linux.hpp index c09be025c12..c2ce765bc4b 100644 --- a/hotspot/src/os/linux/vm/os_linux.hpp +++ b/hotspot/src/os/linux/vm/os_linux.hpp @@ -70,6 +70,7 @@ class Linux { static pthread_t _main_thread; static Mutex* _createThread_lock; static int _page_size; + static const int _vm_default_page_size; static julong available_memory(); static julong physical_memory() { return _physical_memory; } @@ -116,6 +117,8 @@ class Linux { static int page_size(void) { return _page_size; } static void set_page_size(int val) { _page_size = val; } + static int vm_default_page_size(void) { return _vm_default_page_size; } + static address ucontext_get_pc(ucontext_t* uc); static intptr_t* ucontext_get_sp(ucontext_t* uc); static intptr_t* ucontext_get_fp(ucontext_t* uc); diff --git a/hotspot/src/os/posix/vm/os_posix.cpp b/hotspot/src/os/posix/vm/os_posix.cpp index 03302783688..e2d7c2d7f7f 100644 --- a/hotspot/src/os/posix/vm/os_posix.cpp +++ b/hotspot/src/os/posix/vm/os_posix.cpp @@ -251,3 +251,11 @@ bool os::has_allocatable_memory_limit(julong* limit) { return true; #endif } + +const char* os::get_current_directory(char *buf, size_t buflen) { + return getcwd(buf, buflen); +} + +FILE* os::open(int fd, const char* mode) { + return ::fdopen(fd, mode); +} diff --git a/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp b/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp index 1a269539695..3fb13e5e97c 100644 --- a/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp +++ b/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,14 +55,14 @@ #include "utilities/accessFlags.hpp" #include "utilities/globalDefinitions.hpp" #ifdef COMPILER1 -#if defined(DEBUG) || defined(FASTDEBUG) +#ifdef ASSERT /* * To avoid the most part of potential link errors * we link this program with -z nodefs . * * But for 'debug1' and 'fastdebug1' we still have to provide - * a particular workaround for the following symbols bellow. + * a particular workaround for the following symbols below. * It will be good to find out a generic way in the future. */ @@ -79,7 +79,7 @@ address StubRoutines::_call_stub_return_address = NULL; StubQueue* AbstractInterpreter::_code = NULL; -#endif /* defined(DEBUG) || defined(FASTDEBUG) */ +#endif /* ASSERT */ #endif /* COMPILER1 */ #define GEN_OFFS(Type,Name) \ diff --git a/hotspot/src/os/solaris/vm/os_solaris.cpp b/hotspot/src/os/solaris/vm/os_solaris.cpp index dc6b8d4919a..09204157d65 100644 --- a/hotspot/src/os/solaris/vm/os_solaris.cpp +++ b/hotspot/src/os/solaris/vm/os_solaris.cpp @@ -824,7 +824,7 @@ void os::init_system_properties_values() { // allocate new buffer and initialize info = (Dl_serinfo*)malloc(_info.dls_size); if (info == NULL) { - vm_exit_out_of_memory(_info.dls_size, + vm_exit_out_of_memory(_info.dls_size, OOM_MALLOC_ERROR, "init_system_properties_values info"); } info->dls_size = _info.dls_size; @@ -866,7 +866,7 @@ void os::init_system_properties_values() { common_path = malloc(bufsize); if (common_path == NULL) { free(info); - vm_exit_out_of_memory(bufsize, + vm_exit_out_of_memory(bufsize, OOM_MALLOC_ERROR, "init_system_properties_values common_path"); } sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch); @@ -879,7 +879,7 @@ void os::init_system_properties_values() { if (library_path == NULL) { free(info); free(common_path); - vm_exit_out_of_memory(bufsize, + vm_exit_out_of_memory(bufsize, OOM_MALLOC_ERROR, "init_system_properties_values library_path"); } library_path[0] = '\0'; @@ -1623,7 +1623,8 @@ void os::thread_local_storage_at_put(int index, void* value) { // %%% this is used only in threadLocalStorage.cpp if (thr_setspecific((thread_key_t)index, value)) { if (errno == ENOMEM) { - vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space"); + vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR, + "thr_setspecific: out of swap space"); } else { fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed " "(%s)", strerror(errno))); @@ -1915,10 +1916,6 @@ bool os::dll_build_name(char* buffer, size_t buflen, return retval; } -const char* os::get_current_directory(char *buf, int buflen) { - return getcwd(buf, buflen); -} - // check if addr is inside libjvm.so bool os::address_is_in_vm(address addr) { static address libjvm_base_addr; diff --git a/hotspot/src/os/windows/vm/chaitin_windows.cpp b/hotspot/src/os/windows/vm/chaitin_windows.cpp deleted file mode 100644 index bae10b3b5c4..00000000000 --- a/hotspot/src/os/windows/vm/chaitin_windows.cpp +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "opto/chaitin.hpp" -#include "opto/machnode.hpp" - -// Disallow the use of the frame pointer (EBP) for implicit null exceptions -// on win95/98. If we do not do this, the OS gets confused and gives a stack -// error. -void PhaseRegAlloc::pd_preallocate_hook() { -#ifndef _WIN64 - if (ImplicitNullChecks && !os::win32::is_nt()) { - for (uint block_num=1; block_num<_cfg._num_blocks; block_num++) { - Block *block = _cfg._blocks[block_num]; - - Node *block_end = block->end(); - if (block_end->is_MachNullCheck() && - block_end->as_Mach()->ideal_Opcode() != Op_Con) { - // The last instruction in the block is an implicit null check. - // Fix its input so that it does not load into the frame pointer. - _matcher.pd_implicit_null_fixup(block_end->in(1)->as_Mach(), - block_end->as_MachNullCheck()->_vidx); - } - } - } -#else - // WIN64==itanium on XP -#endif -} - -#ifdef ASSERT -// Verify that no implicit null check uses the frame pointer (EBP) as -// its register on win95/98. Use of the frame pointer in an implicit -// null check confuses the OS, yielding a stack error. -void PhaseRegAlloc::pd_postallocate_verify_hook() { -#ifndef _WIN64 - if (ImplicitNullChecks && !os::win32::is_nt()) { - for (uint block_num=1; block_num<_cfg._num_blocks; block_num++) { - Block *block = _cfg._blocks[block_num]; - - Node *block_end = block->_nodes[block->_nodes.size()-1]; - if (block_end->is_MachNullCheck() && block_end->as_Mach()->ideal_Opcode() != Op_Con) { - // The last instruction in the block is an implicit - // null check. Verify that this instruction does not - // use the frame pointer. - int reg = get_reg_first(block_end->in(1)->in(block_end->as_MachNullCheck()->_vidx)); - assert(reg != EBP_num, - "implicit null check using frame pointer on win95/98"); - } - } - } -#else - // WIN64==itanium on XP -#endif -} -#endif diff --git a/hotspot/src/os/windows/vm/os_windows.cpp b/hotspot/src/os/windows/vm/os_windows.cpp index 0a8034d3217..dfcb64dd99b 100644 --- a/hotspot/src/os/windows/vm/os_windows.cpp +++ b/hotspot/src/os/windows/vm/os_windows.cpp @@ -1221,8 +1221,10 @@ bool os::dll_build_name(char *buffer, size_t buflen, // Needs to be in os specific directory because windows requires another // header file -const char* os::get_current_directory(char *buf, int buflen) { - return _getcwd(buf, buflen); +const char* os::get_current_directory(char *buf, size_t buflen) { + int n = static_cast (buflen); + if (buflen > INT_MAX) n = INT_MAX; + return _getcwd(buf, n); } //----------------------------------------------------------- @@ -4098,6 +4100,10 @@ int os::open(const char *path, int oflag, int mode) { return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); } +FILE* os::open(int fd, const char* mode) { + return ::_fdopen(fd, mode); +} + // Is a (classpath) directory empty? bool os::dir_is_empty(const char* path) { WIN32_FIND_DATA fd; @@ -4238,9 +4244,6 @@ char * os::native_path(char *path) { path[3] = '\0'; } - #ifdef DEBUG - jio_fprintf(stderr, "sysNativePath: %s\n", path); - #endif DEBUG return path; } diff --git a/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp b/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp index cf8634f90cb..d97f0e041bf 100644 --- a/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp +++ b/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp @@ -178,7 +178,7 @@ static void current_stack_region(address* bottom, size_t* size) { // JVM needs to know exact stack location, abort if it fails if (rslt != 0) { if (rslt == ENOMEM) { - vm_exit_out_of_memory(0, "pthread_getattr_np"); + vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np"); } else { fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt)); } diff --git a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp index 5cdab45d4cb..4fbea466ced 100644 --- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp +++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp @@ -710,7 +710,7 @@ static void current_stack_region(address * bottom, size_t * size) { // JVM needs to know exact stack location, abort if it fails if (rslt != 0) { if (rslt == ENOMEM) { - vm_exit_out_of_memory(0, "pthread_getattr_np"); + vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np"); } else { fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt)); } diff --git a/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp b/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp index 74ec81369ae..de0ba9a6ad5 100644 --- a/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp +++ b/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp @@ -313,7 +313,7 @@ static void current_stack_region(address *bottom, size_t *size) { int res = pthread_getattr_np(pthread_self(), &attr); if (res != 0) { if (res == ENOMEM) { - vm_exit_out_of_memory(0, "pthread_getattr_np"); + vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np"); } else { fatal(err_msg("pthread_getattr_np failed with errno = %d", res)); diff --git a/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp b/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp index f0db469941a..78902e1b477 100644 --- a/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp +++ b/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp @@ -591,7 +591,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid, // on the thread stack, which could get a mapping error when touched. address addr = (address) info->si_addr; if (sig == SIGBUS && info->si_code == BUS_OBJERR && info->si_errno == ENOMEM) { - vm_exit_out_of_memory(0, "Out of swap space to map in thread stack."); + vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "Out of swap space to map in thread stack."); } VMError err(t, sig, pc, info, ucVoid); diff --git a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp index d954063c296..51846441ecd 100644 --- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp +++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp @@ -745,7 +745,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid, // on the thread stack, which could get a mapping error when touched. address addr = (address) info->si_addr; if (sig == SIGBUS && info->si_code == BUS_OBJERR && info->si_errno == ENOMEM) { - vm_exit_out_of_memory(0, "Out of swap space to map in thread stack."); + vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "Out of swap space to map in thread stack."); } VMError err(t, sig, pc, info, ucVoid); diff --git a/hotspot/src/share/tools/hsdis/Makefile b/hotspot/src/share/tools/hsdis/Makefile index 3cfdf57e6bd..2a48f9a81d8 100644 --- a/hotspot/src/share/tools/hsdis/Makefile +++ b/hotspot/src/share/tools/hsdis/Makefile @@ -1,5 +1,5 @@ # -# Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Single gnu makefile for solaris, linux and windows (windows requires cygwin and mingw) @@ -66,7 +66,7 @@ ARCH=i386 endif CC = $(MINGW)-gcc CONFIGURE_ARGS= --host=$(MINGW) --target=$(MINGW) -else #linux +else #linux CPU = $(shell uname -m) ARCH1=$(CPU:x86_64=amd64) ARCH=$(ARCH1:i686=i386) @@ -116,7 +116,6 @@ OUTFLAGS += -o $@ else #Windows OS = windows CC = gcc -#CPPFLAGS += /D"WIN32" /D"_WINDOWS" /D"DEBUG" /D"NDEBUG" CFLAGS += /nologo /MD /W3 /WX /O2 /Fo$(@:.dll=.obj) /Gi- CFLAGS += LIBARCH=\"$(LIBARCH)\" DLDFLAGS += /dll /subsystem:windows /incremental:no \ diff --git a/hotspot/src/share/vm/adlc/main.cpp b/hotspot/src/share/vm/adlc/main.cpp index 8ecce63dd78..4b812316381 100644 --- a/hotspot/src/share/vm/adlc/main.cpp +++ b/hotspot/src/share/vm/adlc/main.cpp @@ -213,6 +213,7 @@ int main(int argc, char *argv[]) AD.addInclude(AD._CPP_file, "adfiles", get_basename(AD._HPP_file._name)); AD.addInclude(AD._CPP_file, "memory/allocation.inline.hpp"); AD.addInclude(AD._CPP_file, "asm/macroAssembler.inline.hpp"); + AD.addInclude(AD._CPP_file, "code/compiledIC.hpp"); AD.addInclude(AD._CPP_file, "code/vmreg.hpp"); AD.addInclude(AD._CPP_file, "gc_interface/collectedHeap.inline.hpp"); AD.addInclude(AD._CPP_file, "oops/compiledICHolder.hpp"); diff --git a/hotspot/src/share/vm/asm/assembler.cpp b/hotspot/src/share/vm/asm/assembler.cpp index d23c0f50f2e..b56e2828636 100644 --- a/hotspot/src/share/vm/asm/assembler.cpp +++ b/hotspot/src/share/vm/asm/assembler.cpp @@ -44,7 +44,7 @@ AbstractAssembler::AbstractAssembler(CodeBuffer* code) { CodeSection* cs = code->insts(); cs->clear_mark(); // new assembler kills old mark if (cs->start() == NULL) { - vm_exit_out_of_memory(0, err_msg("CodeCache: no room for %s", + vm_exit_out_of_memory(0, OOM_MMAP_ERROR, err_msg("CodeCache: no room for %s", code->name())); } _code_section = cs; diff --git a/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp b/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp index a4cda5f904f..b80b199c9c2 100644 --- a/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp +++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp @@ -938,5 +938,7 @@ void Canonicalizer::do_ProfileCall(ProfileCall* x) {} void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {} void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {} void Canonicalizer::do_RangeCheckPredicate(RangeCheckPredicate* x) {} +#ifdef ASSERT void Canonicalizer::do_Assert(Assert* x) {} +#endif void Canonicalizer::do_MemBar(MemBar* x) {} diff --git a/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp b/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp index b8bcfd7e65f..9e34ac79a31 100644 --- a/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp +++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp @@ -108,7 +108,9 @@ class Canonicalizer: InstructionVisitor { virtual void do_RuntimeCall (RuntimeCall* x); virtual void do_MemBar (MemBar* x); virtual void do_RangeCheckPredicate(RangeCheckPredicate* x); +#ifdef ASSERT virtual void do_Assert (Assert* x); +#endif }; #endif // SHARE_VM_C1_C1_CANONICALIZER_HPP diff --git a/hotspot/src/share/vm/c1/c1_Instruction.hpp b/hotspot/src/share/vm/c1/c1_Instruction.hpp index b93525bf502..6b1f6ddd380 100644 --- a/hotspot/src/share/vm/c1/c1_Instruction.hpp +++ b/hotspot/src/share/vm/c1/c1_Instruction.hpp @@ -111,7 +111,9 @@ class ProfileInvoke; class RuntimeCall; class MemBar; class RangeCheckPredicate; +#ifdef ASSERT class Assert; +#endif // A Value is a reference to the instruction creating the value typedef Instruction* Value; diff --git a/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp b/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp index 4c88e50cb21..cfca00ab277 100644 --- a/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp +++ b/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp @@ -871,12 +871,14 @@ void InstructionPrinter::do_RangeCheckPredicate(RangeCheckPredicate* x) { } } +#ifdef ASSERT void InstructionPrinter::do_Assert(Assert* x) { output()->print("assert "); print_value(x->x()); output()->print(" %s ", cond_name(x->cond())); print_value(x->y()); } +#endif void InstructionPrinter::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { print_unsafe_object_op(x, "UnsafePrefetchWrite"); diff --git a/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp b/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp index d8d6502ebd6..8c80b6c7507 100644 --- a/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp +++ b/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp @@ -136,7 +136,9 @@ class InstructionPrinter: public InstructionVisitor { virtual void do_RuntimeCall (RuntimeCall* x); virtual void do_MemBar (MemBar* x); virtual void do_RangeCheckPredicate(RangeCheckPredicate* x); +#ifdef ASSERT virtual void do_Assert (Assert* x); +#endif }; #endif // PRODUCT diff --git a/hotspot/src/share/vm/c1/c1_LIR.cpp b/hotspot/src/share/vm/c1/c1_LIR.cpp index df0828ee555..f26d1812c44 100644 --- a/hotspot/src/share/vm/c1/c1_LIR.cpp +++ b/hotspot/src/share/vm/c1/c1_LIR.cpp @@ -1778,7 +1778,9 @@ const char * LIR_Op::name() const { // LIR_OpProfileCall case lir_profile_call: s = "profile_call"; break; // LIR_OpAssert +#ifdef ASSERT case lir_assert: s = "assert"; break; +#endif case lir_none: ShouldNotReachHere();break; default: s = "illegal_op"; break; } @@ -2025,12 +2027,14 @@ void LIR_OpLock::print_instr(outputStream* out) const { out->print("[lbl:0x%x]", stub()->entry()); } +#ifdef ASSERT void LIR_OpAssert::print_instr(outputStream* out) const { print_condition(out, condition()); out->print(" "); in_opr1()->print(out); out->print(" "); in_opr2()->print(out); out->print(", \""); out->print(msg()); out->print("\""); } +#endif void LIR_OpDelay::print_instr(outputStream* out) const { diff --git a/hotspot/src/share/vm/c1/c1_LIR.hpp b/hotspot/src/share/vm/c1/c1_LIR.hpp index 5bd0e57d6f9..61dd59e3fe9 100644 --- a/hotspot/src/share/vm/c1/c1_LIR.hpp +++ b/hotspot/src/share/vm/c1/c1_LIR.hpp @@ -881,8 +881,9 @@ class LIR_OpLock; class LIR_OpTypeCheck; class LIR_OpCompareAndSwap; class LIR_OpProfileCall; +#ifdef ASSERT class LIR_OpAssert; - +#endif // LIR operation codes enum LIR_Code { @@ -1139,7 +1140,9 @@ class LIR_Op: public CompilationResourceObj { virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; } virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; } virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; } +#ifdef ASSERT virtual LIR_OpAssert* as_OpAssert() { return NULL; } +#endif virtual void verify() const {} }; diff --git a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp index ffb2965fdfc..abb4914fbfa 100644 --- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp +++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -3044,21 +3044,20 @@ void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info, assert(level > CompLevel_simple, "Shouldn't be here"); int offset = -1; - LIR_Opr counter_holder = new_register(T_METADATA); - LIR_Opr meth; + LIR_Opr counter_holder; if (level == CompLevel_limited_profile) { - offset = in_bytes(backedge ? Method::backedge_counter_offset() : - Method::invocation_counter_offset()); - __ metadata2reg(method->constant_encoding(), counter_holder); - meth = counter_holder; + address counters_adr = method->ensure_method_counters(); + counter_holder = new_pointer_register(); + __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder); + offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() : + MethodCounters::invocation_counter_offset()); } else if (level == CompLevel_full_profile) { + counter_holder = new_register(T_METADATA); offset = in_bytes(backedge ? MethodData::backedge_counter_offset() : MethodData::invocation_counter_offset()); ciMethodData* md = method->method_data_or_null(); assert(md != NULL, "Sanity"); __ metadata2reg(md->constant_encoding(), counter_holder); - meth = new_register(T_METADATA); - __ metadata2reg(method->constant_encoding(), meth); } else { ShouldNotReachHere(); } @@ -3069,6 +3068,8 @@ void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info, __ store(result, counter); if (notify) { LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT); + LIR_Opr meth = new_register(T_METADATA); + __ metadata2reg(method->constant_encoding(), meth); __ logical_and(result, mask, result); __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0)); // The bci for info can point to cmp for if's we want the if bci @@ -3103,8 +3104,8 @@ void LIRGenerator::do_RuntimeCall(RuntimeCall* x) { } } -void LIRGenerator::do_Assert(Assert *x) { #ifdef ASSERT +void LIRGenerator::do_Assert(Assert *x) { ValueTag tag = x->x()->type()->tag(); If::Condition cond = x->cond(); @@ -3124,9 +3125,8 @@ void LIRGenerator::do_Assert(Assert *x) { LIR_Opr right = yin->result(); __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true); -#endif } - +#endif void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) { diff --git a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp index 4c70a9f64fd..d3c76865dbd 100644 --- a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp +++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp @@ -537,7 +537,9 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure { virtual void do_RuntimeCall (RuntimeCall* x); virtual void do_MemBar (MemBar* x); virtual void do_RangeCheckPredicate(RangeCheckPredicate* x); +#ifdef ASSERT virtual void do_Assert (Assert* x); +#endif }; diff --git a/hotspot/src/share/vm/c1/c1_Optimizer.cpp b/hotspot/src/share/vm/c1/c1_Optimizer.cpp index 74e9d2240db..90dc2797210 100644 --- a/hotspot/src/share/vm/c1/c1_Optimizer.cpp +++ b/hotspot/src/share/vm/c1/c1_Optimizer.cpp @@ -535,7 +535,9 @@ public: void do_RuntimeCall (RuntimeCall* x); void do_MemBar (MemBar* x); void do_RangeCheckPredicate(RangeCheckPredicate* x); +#ifdef ASSERT void do_Assert (Assert* x); +#endif }; @@ -718,8 +720,9 @@ void NullCheckVisitor::do_ProfileInvoke (ProfileInvoke* x) {} void NullCheckVisitor::do_RuntimeCall (RuntimeCall* x) {} void NullCheckVisitor::do_MemBar (MemBar* x) {} void NullCheckVisitor::do_RangeCheckPredicate(RangeCheckPredicate* x) {} +#ifdef ASSERT void NullCheckVisitor::do_Assert (Assert* x) {} - +#endif void NullCheckEliminator::visit(Value* p) { assert(*p != NULL, "should not find NULL instructions"); diff --git a/hotspot/src/share/vm/c1/c1_RangeCheckElimination.cpp b/hotspot/src/share/vm/c1/c1_RangeCheckElimination.cpp index 40c448a39ed..12cdce62fee 100644 --- a/hotspot/src/share/vm/c1/c1_RangeCheckElimination.cpp +++ b/hotspot/src/share/vm/c1/c1_RangeCheckElimination.cpp @@ -459,7 +459,7 @@ void RangeCheckEliminator::in_block_motion(BlockBegin *block, AccessIndexedList // Iterate over all different indices if (_optimistic) { - for (int i=0; i id()]; assert(info != NULL, "Info must not be null"); @@ -531,9 +531,7 @@ void RangeCheckEliminator::in_block_motion(BlockBegin *block, AccessIndexedList remove_range_check(ai); } } - _access_indexed_info[index_instruction->id()] = NULL; } - indices.clear(); if (list_constant.length() > 1) { AccessIndexed *first = list_constant.at(0); @@ -560,6 +558,13 @@ void RangeCheckEliminator::in_block_motion(BlockBegin *block, AccessIndexedList } } } + + // Clear data structures for next array + for (int i = 0; i < indices.length(); i++) { + Instruction *index_instruction = indices.at(i); + _access_indexed_info[index_instruction->id()] = NULL; + } + indices.clear(); } } diff --git a/hotspot/src/share/vm/c1/c1_RangeCheckElimination.hpp b/hotspot/src/share/vm/c1/c1_RangeCheckElimination.hpp index af6d9d94815..ae1a2556881 100644 --- a/hotspot/src/share/vm/c1/c1_RangeCheckElimination.hpp +++ b/hotspot/src/share/vm/c1/c1_RangeCheckElimination.hpp @@ -166,7 +166,9 @@ public: void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ }; void do_MemBar (MemBar* x) { /* nothing to do */ }; void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ }; +#ifdef ASSERT void do_Assert (Assert* x) { /* nothing to do */ }; +#endif }; #ifdef ASSERT diff --git a/hotspot/src/share/vm/c1/c1_ValueMap.hpp b/hotspot/src/share/vm/c1/c1_ValueMap.hpp index c76ef46bef4..820d1909efa 100644 --- a/hotspot/src/share/vm/c1/c1_ValueMap.hpp +++ b/hotspot/src/share/vm/c1/c1_ValueMap.hpp @@ -207,7 +207,9 @@ class ValueNumberingVisitor: public InstructionVisitor { void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ }; void do_MemBar (MemBar* x) { /* nothing to do */ }; void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ }; +#ifdef ASSERT void do_Assert (Assert* x) { /* nothing to do */ }; +#endif }; diff --git a/hotspot/src/share/vm/ci/ciEnv.cpp b/hotspot/src/share/vm/ci/ciEnv.cpp index 6dff1e9396e..7776db5eb6d 100644 --- a/hotspot/src/share/vm/ci/ciEnv.cpp +++ b/hotspot/src/share/vm/ci/ciEnv.cpp @@ -483,7 +483,8 @@ ciKlass* ciEnv::get_klass_by_index_impl(constantPoolHandle cpool, { // We have to lock the cpool to keep the oop from being resolved // while we are accessing it. - MonitorLockerEx ml(cpool->lock()); + oop cplock = cpool->lock(); + ObjectLocker ol(cplock, THREAD, cplock != NULL); constantTag tag = cpool->tag_at(index); if (tag.is_klass()) { // The klass has been inserted into the constant pool @@ -1149,23 +1150,9 @@ void ciEnv::record_out_of_memory_failure() { record_method_not_compilable("out of memory"); } -fileStream* ciEnv::_replay_data_stream = NULL; - -void ciEnv::dump_replay_data() { +void ciEnv::dump_replay_data(outputStream* out) { VM_ENTRY_MARK; MutexLocker ml(Compile_lock); - if (_replay_data_stream == NULL) { - _replay_data_stream = new (ResourceObj::C_HEAP, mtCompiler) fileStream(ReplayDataFile); - if (_replay_data_stream == NULL) { - fatal(err_msg("Can't open %s for replay data", ReplayDataFile)); - } - } - dump_replay_data(_replay_data_stream); -} - - -void ciEnv::dump_replay_data(outputStream* out) { - ASSERT_IN_VM; ResourceMark rm; #if INCLUDE_JVMTI out->print_cr("JvmtiExport can_access_local_variables %d", _jvmti_can_access_local_variables); @@ -1178,13 +1165,15 @@ void ciEnv::dump_replay_data(outputStream* out) { for (int i = 0; i < objects->length(); i++) { objects->at(i)->dump_replay_data(out); } - Method* method = task()->method(); - int entry_bci = task()->osr_bci(); + CompileTask* task = this->task(); + Method* method = task->method(); + int entry_bci = task->osr_bci(); + int comp_level = task->comp_level(); // Klass holder = method->method_holder(); - out->print_cr("compile %s %s %s %d", + out->print_cr("compile %s %s %s %d %d", method->klass_name()->as_quoted_ascii(), method->name()->as_quoted_ascii(), method->signature()->as_quoted_ascii(), - entry_bci); + entry_bci, comp_level); out->flush(); } diff --git a/hotspot/src/share/vm/ci/ciEnv.hpp b/hotspot/src/share/vm/ci/ciEnv.hpp index 042bc32fa00..45dd42eb25e 100644 --- a/hotspot/src/share/vm/ci/ciEnv.hpp +++ b/hotspot/src/share/vm/ci/ciEnv.hpp @@ -46,8 +46,6 @@ class ciEnv : StackObj { friend class CompileBroker; friend class Dependencies; // for get_object, during logging - static fileStream* _replay_data_stream; - private: Arena* _arena; // Alias for _ciEnv_arena except in init_shared_objects() Arena _ciEnv_arena; @@ -451,10 +449,6 @@ public: // RedefineClasses support void metadata_do(void f(Metadata*)) { _factory->metadata_do(f); } - // Dump the compilation replay data for this ciEnv to - // ReplayDataFile, creating the file if needed. - void dump_replay_data(); - // Dump the compilation replay data for the ciEnv to the stream. void dump_replay_data(outputStream* out); }; diff --git a/hotspot/src/share/vm/ci/ciMethod.cpp b/hotspot/src/share/vm/ci/ciMethod.cpp index 646de740f8f..780f4ad868f 100644 --- a/hotspot/src/share/vm/ci/ciMethod.cpp +++ b/hotspot/src/share/vm/ci/ciMethod.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -904,6 +904,20 @@ ciMethodData* ciMethod::method_data_or_null() { return md; } +// ------------------------------------------------------------------ +// ciMethod::ensure_method_counters +// +address ciMethod::ensure_method_counters() { + check_is_loaded(); + VM_ENTRY_MARK; + methodHandle mh(THREAD, get_Method()); + MethodCounters *counter = mh->method_counters(); + if (counter == NULL) { + counter = Method::build_method_counters(mh(), CHECK_AND_CLEAR_NULL); + } + return (address)counter; +} + // ------------------------------------------------------------------ // ciMethod::should_exclude // @@ -1191,13 +1205,14 @@ void ciMethod::dump_replay_data(outputStream* st) { ASSERT_IN_VM; ResourceMark rm; Method* method = get_Method(); + MethodCounters* mcs = method->method_counters(); Klass* holder = method->method_holder(); st->print_cr("ciMethod %s %s %s %d %d %d %d %d", holder->name()->as_quoted_ascii(), method->name()->as_quoted_ascii(), method->signature()->as_quoted_ascii(), - method->invocation_counter()->raw_counter(), - method->backedge_counter()->raw_counter(), + mcs == NULL ? 0 : mcs->invocation_counter()->raw_counter(), + mcs == NULL ? 0 : mcs->backedge_counter()->raw_counter(), interpreter_invocation_count(), interpreter_throwout_count(), _instructions_size); diff --git a/hotspot/src/share/vm/ci/ciMethod.hpp b/hotspot/src/share/vm/ci/ciMethod.hpp index cbef39b3cdc..46ea500b551 100644 --- a/hotspot/src/share/vm/ci/ciMethod.hpp +++ b/hotspot/src/share/vm/ci/ciMethod.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -196,7 +196,6 @@ class ciMethod : public ciMetadata { // Analysis and profiling. // // Usage note: liveness_at_bci and init_vars should be wrapped in ResourceMarks. - bool uses_monitors() const { return _uses_monitors; } // this one should go away, it has a misleading name bool has_monitor_bytecodes() const { return _uses_monitors; } bool has_balanced_monitors(); @@ -262,6 +261,7 @@ class ciMethod : public ciMetadata { bool is_klass_loaded(int refinfo_index, bool must_be_resolved) const; bool check_call(int refinfo_index, bool is_static) const; bool ensure_method_data(); // make sure it exists in the VM also + address ensure_method_counters(); int instructions_size(); int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC diff --git a/hotspot/src/share/vm/ci/ciReplay.cpp b/hotspot/src/share/vm/ci/ciReplay.cpp index 030d93db7c3..a314f35f821 100644 --- a/hotspot/src/share/vm/ci/ciReplay.cpp +++ b/hotspot/src/share/vm/ci/ciReplay.cpp @@ -89,7 +89,7 @@ class CompileReplay : public StackObj { loader = Handle(thread, SystemDictionary::java_system_loader()); stream = fopen(filename, "rt"); if (stream == NULL) { - fprintf(stderr, "Can't open replay file %s\n", filename); + fprintf(stderr, "ERROR: Can't open replay file %s\n", filename); } buffer_length = 32; buffer = NEW_RESOURCE_ARRAY(char, buffer_length); @@ -327,7 +327,6 @@ class CompileReplay : public StackObj { if (had_error()) { tty->print_cr("Error while parsing line %d: %s\n", line_no, _error_message); tty->print_cr("%s", buffer); - assert(false, "error"); return; } pos = 0; @@ -370,11 +369,47 @@ class CompileReplay : public StackObj { } } - // compile + // validation of comp_level + bool is_valid_comp_level(int comp_level) { + const int msg_len = 256; + char* msg = NULL; + if (!is_compile(comp_level)) { + msg = NEW_RESOURCE_ARRAY(char, msg_len); + jio_snprintf(msg, msg_len, "%d isn't compilation level", comp_level); + } else if (!TieredCompilation && (comp_level != CompLevel_highest_tier)) { + msg = NEW_RESOURCE_ARRAY(char, msg_len); + switch (comp_level) { + case CompLevel_simple: + jio_snprintf(msg, msg_len, "compilation level %d requires Client VM or TieredCompilation", comp_level); + break; + case CompLevel_full_optimization: + jio_snprintf(msg, msg_len, "compilation level %d requires Server VM", comp_level); + break; + default: + jio_snprintf(msg, msg_len, "compilation level %d requires TieredCompilation", comp_level); + } + } + if (msg != NULL) { + report_error(msg); + return false; + } + return true; + } + + // compile void process_compile(TRAPS) { // methodHandle method; Method* method = parse_method(CHECK); int entry_bci = parse_int("entry_bci"); + const char* comp_level_label = "comp_level"; + int comp_level = parse_int(comp_level_label); + // old version w/o comp_level + if (had_error() && (error_message() == comp_level_label)) { + comp_level = CompLevel_full_optimization; + } + if (!is_valid_comp_level(comp_level)) { + return; + } Klass* k = method->method_holder(); ((InstanceKlass*)k)->initialize(THREAD); if (HAS_PENDING_EXCEPTION) { @@ -389,12 +424,12 @@ class CompileReplay : public StackObj { } } // Make sure the existence of a prior compile doesn't stop this one - nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, CompLevel_full_optimization, true) : method->code(); + nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code(); if (nm != NULL) { nm->make_not_entrant(); } replay_state = this; - CompileBroker::compile_method(method, entry_bci, CompLevel_full_optimization, + CompileBroker::compile_method(method, entry_bci, comp_level, methodHandle(), 0, "replay", THREAD); replay_state = NULL; reset(); @@ -551,7 +586,7 @@ class CompileReplay : public StackObj { if (parsed_two_word == i) continue; default: - ShouldNotReachHere(); + fatal(err_msg_res("Unexpected tag: %d", cp->tag_at(i).value())); break; } @@ -819,6 +854,11 @@ int ciReplay::replay_impl(TRAPS) { ReplaySuppressInitializers = 1; } + if (FLAG_IS_DEFAULT(ReplayDataFile)) { + tty->print_cr("ERROR: no compiler replay data file specified (use -XX:ReplayDataFile=replay_pid12345.txt)."); + return 1; + } + // Load and parse the replay data CompileReplay rp(ReplayDataFile, THREAD); int exit_code = 0; @@ -920,12 +960,17 @@ void ciReplay::initialize(ciMethod* m) { method->print_name(tty); tty->cr(); } else { + EXCEPTION_CONTEXT; + MethodCounters* mcs = method->method_counters(); // m->_instructions_size = rec->instructions_size; m->_instructions_size = -1; m->_interpreter_invocation_count = rec->interpreter_invocation_count; m->_interpreter_throwout_count = rec->interpreter_throwout_count; - method->invocation_counter()->_counter = rec->invocation_counter; - method->backedge_counter()->_counter = rec->backedge_counter; + if (mcs == NULL) { + mcs = Method::build_method_counters(method, CHECK_AND_CLEAR); + } + mcs->invocation_counter()->_counter = rec->invocation_counter; + mcs->backedge_counter()->_counter = rec->backedge_counter; } } diff --git a/hotspot/src/share/vm/classfile/classFileParser.cpp b/hotspot/src/share/vm/classfile/classFileParser.cpp index ad11f2005df..c93d72de623 100644 --- a/hotspot/src/share/vm/classfile/classFileParser.cpp +++ b/hotspot/src/share/vm/classfile/classFileParser.cpp @@ -436,14 +436,19 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) { ref_index, CHECK_(nullHandle)); break; case JVM_REF_invokeVirtual: - case JVM_REF_invokeStatic: - case JVM_REF_invokeSpecial: case JVM_REF_newInvokeSpecial: check_property( tag.is_method(), "Invalid constant pool index %u in class file %s (not a method)", ref_index, CHECK_(nullHandle)); break; + case JVM_REF_invokeStatic: + case JVM_REF_invokeSpecial: + check_property( + tag.is_method() || tag.is_interface_method(), + "Invalid constant pool index %u in class file %s (not a method)", + ref_index, CHECK_(nullHandle)); + break; case JVM_REF_invokeInterface: check_property( tag.is_interface_method(), @@ -2022,7 +2027,6 @@ methodHandle ClassFileParser::parse_method(bool is_interface, u2 method_parameters_length = 0; u1* method_parameters_data = NULL; bool method_parameters_seen = false; - bool method_parameters_four_byte_flags; bool parsed_code_attribute = false; bool parsed_checked_exceptions_attribute = false; bool parsed_stackmap_attribute = false; @@ -2236,26 +2240,14 @@ methodHandle ClassFileParser::parse_method(bool is_interface, } method_parameters_seen = true; method_parameters_length = cfs->get_u1_fast(); - // Track the actual size (note: this is written for clarity; a - // decent compiler will CSE and constant-fold this into a single - // expression) - // Use the attribute length to figure out the size of flags - if (method_attribute_length == (method_parameters_length * 6u) + 1u) { - method_parameters_four_byte_flags = true; - } else if (method_attribute_length == (method_parameters_length * 4u) + 1u) { - method_parameters_four_byte_flags = false; - } else { + if (method_attribute_length != (method_parameters_length * 4u) + 1u) { classfile_parse_error( "Invalid MethodParameters method attribute length %u in class file", method_attribute_length, CHECK_(nullHandle)); } method_parameters_data = cfs->get_u1_buffer(); cfs->skip_u2_fast(method_parameters_length); - if (method_parameters_four_byte_flags) { - cfs->skip_u4_fast(method_parameters_length); - } else { - cfs->skip_u2_fast(method_parameters_length); - } + cfs->skip_u2_fast(method_parameters_length); // ignore this attribute if it cannot be reflected if (!SystemDictionary::Parameter_klass_loaded()) method_parameters_length = 0; @@ -2418,13 +2410,8 @@ methodHandle ClassFileParser::parse_method(bool is_interface, for (int i = 0; i < method_parameters_length; i++) { elem[i].name_cp_index = Bytes::get_Java_u2(method_parameters_data); method_parameters_data += 2; - if (method_parameters_four_byte_flags) { - elem[i].flags = Bytes::get_Java_u4(method_parameters_data); - method_parameters_data += 4; - } else { - elem[i].flags = Bytes::get_Java_u2(method_parameters_data); - method_parameters_data += 2; - } + elem[i].flags = Bytes::get_Java_u2(method_parameters_data); + method_parameters_data += 2; } } @@ -3837,7 +3824,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, } if (TraceClassLoadingPreorder) { - tty->print("[Loading %s", name->as_klass_external_name()); + tty->print("[Loading %s", (name != NULL) ? name->as_klass_external_name() : "NoName"); if (cfs->source() != NULL) tty->print(" from %s", cfs->source()); tty->print_cr("]"); } diff --git a/hotspot/src/share/vm/classfile/classFileParser.hpp b/hotspot/src/share/vm/classfile/classFileParser.hpp index 21844054187..8f070747250 100644 --- a/hotspot/src/share/vm/classfile/classFileParser.hpp +++ b/hotspot/src/share/vm/classfile/classFileParser.hpp @@ -304,7 +304,19 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { inline void assert_property(bool b, const char* msg, TRAPS) { #ifdef ASSERT - if (!b) { fatal(msg); } + if (!b) { + ResourceMark rm(THREAD); + fatal(err_msg(msg, _class_name->as_C_string())); + } +#endif + } + + inline void assert_property(bool b, const char* msg, int index, TRAPS) { +#ifdef ASSERT + if (!b) { + ResourceMark rm(THREAD); + fatal(err_msg(msg, index, _class_name->as_C_string())); + } #endif } @@ -312,7 +324,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { if (_need_verify) { guarantee_property(property, msg, index, CHECK); } else { - assert_property(property, msg, CHECK); + assert_property(property, msg, index, CHECK); } } diff --git a/hotspot/src/share/vm/classfile/classLoader.cpp b/hotspot/src/share/vm/classfile/classLoader.cpp index 7e92f0661c8..650a299f346 100644 --- a/hotspot/src/share/vm/classfile/classLoader.cpp +++ b/hotspot/src/share/vm/classfile/classLoader.cpp @@ -1274,13 +1274,16 @@ void ClassLoader::compile_the_world() { Handle system_class_loader (THREAD, SystemDictionary::java_system_loader()); // Iterate over all bootstrap class path entries ClassPathEntry* e = _first_entry; + jlong start = os::javaTimeMillis(); while (e != NULL) { // We stop at rt.jar, unless it is the first bootstrap path entry if (e->is_rt_jar() && e != _first_entry) break; e->compile_the_world(system_class_loader, CATCH); e = e->next(); } - tty->print_cr("CompileTheWorld : Done"); + jlong end = os::javaTimeMillis(); + tty->print_cr("CompileTheWorld : Done (%d classes, %d methods, %d ms)", + _compile_the_world_class_counter, _compile_the_world_method_counter, (end - start)); { // Print statistics as if before normal exit: extern void print_statistics(); @@ -1289,7 +1292,8 @@ void ClassLoader::compile_the_world() { vm_exit(0); } -int ClassLoader::_compile_the_world_counter = 0; +int ClassLoader::_compile_the_world_class_counter = 0; +int ClassLoader::_compile_the_world_method_counter = 0; static int _codecache_sweep_counter = 0; // Filter out all exceptions except OOMs @@ -1311,8 +1315,8 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) { // If the file has a period after removing .class, it's not really a // valid class file. The class loader will check everything else. if (strchr(buffer, '.') == NULL) { - _compile_the_world_counter++; - if (_compile_the_world_counter > CompileTheWorldStopAt) return; + _compile_the_world_class_counter++; + if (_compile_the_world_class_counter > CompileTheWorldStopAt) return; // Construct name without extension TempNewSymbol sym = SymbolTable::new_symbol(buffer, CHECK); @@ -1329,21 +1333,22 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) { if (HAS_PENDING_EXCEPTION) { // If something went wrong in preloading we just ignore it clear_pending_exception_if_not_oom(CHECK); - tty->print_cr("Preloading failed for (%d) %s", _compile_the_world_counter, buffer); + tty->print_cr("Preloading failed for (%d) %s", _compile_the_world_class_counter, buffer); } } - if (_compile_the_world_counter >= CompileTheWorldStartAt) { + if (_compile_the_world_class_counter >= CompileTheWorldStartAt) { if (k.is_null() || exception_occurred) { // If something went wrong (e.g. ExceptionInInitializerError) we skip this class - tty->print_cr("CompileTheWorld (%d) : Skipping %s", _compile_the_world_counter, buffer); + tty->print_cr("CompileTheWorld (%d) : Skipping %s", _compile_the_world_class_counter, buffer); } else { - tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_counter, buffer); + tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_class_counter, buffer); // Preload all classes to get around uncommon traps // Iterate over all methods in class + int comp_level = CompilationPolicy::policy()->initial_compile_level(); for (int n = 0; n < k->methods()->length(); n++) { methodHandle m (THREAD, k->methods()->at(n)); - if (CompilationPolicy::can_be_compiled(m)) { + if (CompilationPolicy::can_be_compiled(m, comp_level)) { if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) { // Give sweeper a chance to keep up with CTW @@ -1352,11 +1357,13 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) { _codecache_sweep_counter = 0; } // Force compilation - CompileBroker::compile_method(m, InvocationEntryBci, CompilationPolicy::policy()->initial_compile_level(), + CompileBroker::compile_method(m, InvocationEntryBci, comp_level, methodHandle(), 0, "CTW", THREAD); if (HAS_PENDING_EXCEPTION) { clear_pending_exception_if_not_oom(CHECK); - tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string()); + tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string()); + } else { + _compile_the_world_method_counter++; } if (TieredCompilation && TieredStopAtLevel >= CompLevel_full_optimization) { // Clobber the first compile and force second tier compilation @@ -1370,7 +1377,9 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) { methodHandle(), 0, "CTW", THREAD); if (HAS_PENDING_EXCEPTION) { clear_pending_exception_if_not_oom(CHECK); - tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string()); + tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string()); + } else { + _compile_the_world_method_counter++; } } } diff --git a/hotspot/src/share/vm/classfile/classLoader.hpp b/hotspot/src/share/vm/classfile/classLoader.hpp index aa68ed2d0c1..786914cad22 100644 --- a/hotspot/src/share/vm/classfile/classLoader.hpp +++ b/hotspot/src/share/vm/classfile/classLoader.hpp @@ -340,11 +340,12 @@ class ClassLoader: AllStatic { // Force compilation of all methods in all classes in bootstrap class path (stress test) #ifndef PRODUCT private: - static int _compile_the_world_counter; + static int _compile_the_world_class_counter; + static int _compile_the_world_method_counter; public: static void compile_the_world(); static void compile_the_world_in(char* name, Handle loader, TRAPS); - static int compile_the_world_counter() { return _compile_the_world_counter; } + static int compile_the_world_counter() { return _compile_the_world_class_counter; } #endif //PRODUCT }; diff --git a/hotspot/src/share/vm/classfile/classLoaderData.cpp b/hotspot/src/share/vm/classfile/classLoaderData.cpp index e20de3c252a..c030645f64b 100644 --- a/hotspot/src/share/vm/classfile/classLoaderData.cpp +++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp @@ -53,6 +53,7 @@ #include "classfile/metadataOnStackMark.hpp" #include "classfile/systemDictionary.hpp" #include "code/codeCache.hpp" +#include "memory/gcLocker.hpp" #include "memory/metadataFactory.hpp" #include "memory/metaspaceShared.hpp" #include "memory/oopFactory.hpp" @@ -65,17 +66,19 @@ ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL; -ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) : +ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) : _class_loader(h_class_loader()), _is_anonymous(is_anonymous), _keep_alive(is_anonymous), // initially _metaspace(NULL), _unloading(false), _klasses(NULL), _claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL), - _next(NULL), _dependencies(), + _next(NULL), _dependencies(dependencies), _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) { // empty } void ClassLoaderData::init_dependencies(TRAPS) { + assert(!Universe::is_fully_initialized(), "should only be called when initializing"); + assert(is_the_null_class_loader_data(), "should only call this for the null class loader"); _dependencies.init(CHECK); } @@ -277,6 +280,9 @@ void ClassLoaderData::remove_class(Klass* scratch_class) { void ClassLoaderData::unload() { _unloading = true; + // Tell serviceability tools these classes are unloading + classes_do(InstanceKlass::notify_unload_class); + if (TraceClassLoaderData) { ResourceMark rm; tty->print("[ClassLoaderData: unload loader data "PTR_FORMAT, this); @@ -300,6 +306,9 @@ bool ClassLoaderData::is_alive(BoolObjectClosure* is_alive_closure) const { ClassLoaderData::~ClassLoaderData() { + // Release C heap structures for all the classes. + classes_do(InstanceKlass::release_C_heap_structures); + Metaspace *m = _metaspace; if (m != NULL) { _metaspace = NULL; @@ -423,7 +432,7 @@ void ClassLoaderData::free_deallocate_list() { // These anonymous class loaders are to contain classes used for JSR292 ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) { // Add a new class loader data to the graph. - return ClassLoaderDataGraph::add(NULL, loader, CHECK_NULL); + return ClassLoaderDataGraph::add(loader, true, CHECK_NULL); } const char* ClassLoaderData::loader_name() { @@ -495,19 +504,22 @@ ClassLoaderData* ClassLoaderDataGraph::_head = NULL; ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL; ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL; - // Add a new class loader data node to the list. Assign the newly created // ClassLoaderData into the java/lang/ClassLoader object as a hidden field -ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle loader, TRAPS) { - // Not assigned a class loader data yet. - // Create one. - ClassLoaderData* *list_head = &_head; - ClassLoaderData* next = _head; +ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRAPS) { + // We need to allocate all the oops for the ClassLoaderData before allocating the + // actual ClassLoaderData object. + ClassLoaderData::Dependencies dependencies(CHECK_NULL); - bool is_anonymous = (cld_addr == NULL); - ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous); + No_Safepoint_Verifier no_safepoints; // we mustn't GC until we've installed the + // ClassLoaderData in the graph since the CLD + // contains unhandled oops - if (cld_addr != NULL) { + ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous, dependencies); + + + if (!is_anonymous) { + ClassLoaderData** cld_addr = java_lang_ClassLoader::loader_data_addr(loader()); // First, Atomically set it ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL); if (old != NULL) { @@ -519,6 +531,9 @@ ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle lo // We won the race, and therefore the task of adding the data to the list of // class loader data + ClassLoaderData** list_head = &_head; + ClassLoaderData* next = _head; + do { cld->set_next(next); ClassLoaderData* exchanged = (ClassLoaderData*)Atomic::cmpxchg_ptr(cld, list_head, next); @@ -531,10 +546,6 @@ ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle lo cld->loader_name()); tty->print_cr("]"); } - // Create dependencies after the CLD is added to the list. Otherwise, - // the GC GC will not find the CLD and the _class_loader field will - // not be updated. - cld->init_dependencies(CHECK_NULL); return cld; } next = exchanged; @@ -665,6 +676,8 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) { dead->unload(); data = data->next(); // Remove from loader list. + // This class loader data will no longer be found + // in the ClassLoaderDataGraph. if (prev != NULL) { prev->set_next(data); } else { @@ -686,6 +699,7 @@ void ClassLoaderDataGraph::purge() { next = purge_me->next(); delete purge_me; } + Metaspace::purge(); } // CDS support diff --git a/hotspot/src/share/vm/classfile/classLoaderData.hpp b/hotspot/src/share/vm/classfile/classLoaderData.hpp index e6315182e18..2a7e43082b2 100644 --- a/hotspot/src/share/vm/classfile/classLoaderData.hpp +++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp @@ -62,7 +62,7 @@ class ClassLoaderDataGraph : public AllStatic { // CMS support. static ClassLoaderData* _saved_head; - static ClassLoaderData* add(ClassLoaderData** loader_data_addr, Handle class_loader, TRAPS); + static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS); public: static ClassLoaderData* find_or_create(Handle class_loader, TRAPS); static void purge(); @@ -100,6 +100,9 @@ class ClassLoaderData : public CHeapObj { Thread* THREAD); public: Dependencies() : _list_head(NULL) {} + Dependencies(TRAPS) : _list_head(NULL) { + init(CHECK); + } void add(Handle dependency, TRAPS); void init(TRAPS); void oops_do(OopClosure* f); @@ -150,7 +153,7 @@ class ClassLoaderData : public CHeapObj { void set_next(ClassLoaderData* next) { _next = next; } ClassLoaderData* next() const { return _next; } - ClassLoaderData(Handle h_class_loader, bool is_anonymous); + ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies); ~ClassLoaderData(); void set_metaspace(Metaspace* m) { _metaspace = m; } @@ -190,7 +193,9 @@ class ClassLoaderData : public CHeapObj { static void init_null_class_loader_data() { assert(_the_null_class_loader_data == NULL, "cannot initialize twice"); assert(ClassLoaderDataGraph::_head == NULL, "cannot initialize twice"); - _the_null_class_loader_data = new ClassLoaderData((oop)NULL, false); + + // We explicitly initialize the Dependencies object at a later phase in the initialization + _the_null_class_loader_data = new ClassLoaderData((oop)NULL, false, Dependencies()); ClassLoaderDataGraph::_head = _the_null_class_loader_data; assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be"); if (DumpSharedSpaces) { diff --git a/hotspot/src/share/vm/classfile/classLoaderData.inline.hpp b/hotspot/src/share/vm/classfile/classLoaderData.inline.hpp index b3a5ccf86d1..018b6761c50 100644 --- a/hotspot/src/share/vm/classfile/classLoaderData.inline.hpp +++ b/hotspot/src/share/vm/classfile/classLoaderData.inline.hpp @@ -43,10 +43,9 @@ inline ClassLoaderData *ClassLoaderDataGraph::find_or_create(Handle loader, TRAP assert(loader() != NULL,"Must be a class loader"); // Gets the class loader data out of the java/lang/ClassLoader object, if non-null // it's already in the loader_data, so no need to add - ClassLoaderData** loader_data_addr = java_lang_ClassLoader::loader_data_addr(loader()); - ClassLoaderData* loader_data_id = *loader_data_addr; - if (loader_data_id) { - return loader_data_id; + ClassLoaderData* loader_data= java_lang_ClassLoader::loader_data(loader()); + if (loader_data) { + return loader_data; } - return ClassLoaderDataGraph::add(loader_data_addr, loader, THREAD); + return ClassLoaderDataGraph::add(loader, false, THREAD); } diff --git a/hotspot/src/share/vm/classfile/dictionary.cpp b/hotspot/src/share/vm/classfile/dictionary.cpp index f400a0c0e3f..92d9fb438fe 100644 --- a/hotspot/src/share/vm/classfile/dictionary.cpp +++ b/hotspot/src/share/vm/classfile/dictionary.cpp @@ -27,7 +27,6 @@ #include "classfile/systemDictionary.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" -#include "services/classLoadingService.hpp" #include "utilities/hashtable.inline.hpp" @@ -156,19 +155,7 @@ bool Dictionary::do_unloading() { if (k_def_class_loader_data == loader_data) { // This is the defining entry, so the referred class is about // to be unloaded. - // Notify the debugger and clean up the class. class_was_unloaded = true; - // notify the debugger - if (JvmtiExport::should_post_class_unload()) { - JvmtiExport::post_class_unload(ik); - } - - // notify ClassLoadingService of class unload - ClassLoadingService::notify_class_unloaded(ik); - - // Clean up C heap - ik->release_C_heap_structures(); - ik->constants()->release_C_heap_structures(); } // Also remove this system dictionary entry. purge_entry = true; diff --git a/hotspot/src/share/vm/classfile/genericSignatures.cpp b/hotspot/src/share/vm/classfile/genericSignatures.cpp index 3b0ffbd3349..33fbca051b7 100644 --- a/hotspot/src/share/vm/classfile/genericSignatures.cpp +++ b/hotspot/src/share/vm/classfile/genericSignatures.cpp @@ -268,8 +268,15 @@ ClassDescriptor* ClassDescriptor::parse_generic_signature( Klass* outer = SystemDictionary::find( outer_name, class_loader, protection_domain, CHECK_NULL); if (outer == NULL && !THREAD->is_Compiler_thread()) { - outer = SystemDictionary::resolve_super_or_fail(original_name, - outer_name, class_loader, protection_domain, false, CHECK_NULL); + if (outer_name == ik->super()->name()) { + outer = SystemDictionary::resolve_super_or_fail(original_name, outer_name, + class_loader, protection_domain, + false, CHECK_NULL); + } + else { + outer = SystemDictionary::resolve_or_fail(outer_name, class_loader, + protection_domain, false, CHECK_NULL); + } } InstanceKlass* outer_ik; diff --git a/hotspot/src/share/vm/classfile/javaClasses.cpp b/hotspot/src/share/vm/classfile/javaClasses.cpp index fe03d3fb132..bb02a6d8694 100644 --- a/hotspot/src/share/vm/classfile/javaClasses.cpp +++ b/hotspot/src/share/vm/classfile/javaClasses.cpp @@ -315,14 +315,18 @@ Handle java_lang_String::char_converter(Handle java_string, jchar from_char, jch return string; } -jchar* java_lang_String::as_unicode_string(oop java_string, int& length) { +jchar* java_lang_String::as_unicode_string(oop java_string, int& length, TRAPS) { typeArrayOop value = java_lang_String::value(java_string); int offset = java_lang_String::offset(java_string); length = java_lang_String::length(java_string); - jchar* result = NEW_RESOURCE_ARRAY(jchar, length); - for (int index = 0; index < length; index++) { - result[index] = value->char_at(index + offset); + jchar* result = NEW_RESOURCE_ARRAY_RETURN_NULL(jchar, length); + if (result != NULL) { + for (int index = 0; index < length; index++) { + result[index] = value->char_at(index + offset); + } + } else { + THROW_MSG_0(vmSymbols::java_lang_OutOfMemoryError(), "could not allocate Unicode string"); } return result; } @@ -2625,6 +2629,15 @@ Metadata* java_lang_invoke_MemberName::vmtarget(oop mname) { return (Metadata*)mname->address_field(_vmtarget_offset); } +#if INCLUDE_JVMTI +// Can be executed on VM thread only +void java_lang_invoke_MemberName::adjust_vmtarget(oop mname, Metadata* ref) { + assert((is_instance(mname) && (flags(mname) & (MN_IS_METHOD | MN_IS_CONSTRUCTOR)) > 0), "wrong type"); + assert(Thread::current()->is_VM_thread(), "not VM thread"); + mname->address_field_put(_vmtarget_offset, (address)ref); +} +#endif // INCLUDE_JVMTI + void java_lang_invoke_MemberName::set_vmtarget(oop mname, Metadata* ref) { assert(is_instance(mname), "wrong type"); // check the type of the vmtarget diff --git a/hotspot/src/share/vm/classfile/javaClasses.hpp b/hotspot/src/share/vm/classfile/javaClasses.hpp index ac0f15e2cee..8e4dd46f3e9 100644 --- a/hotspot/src/share/vm/classfile/javaClasses.hpp +++ b/hotspot/src/share/vm/classfile/javaClasses.hpp @@ -153,7 +153,7 @@ class java_lang_String : AllStatic { static char* as_utf8_string(oop java_string, char* buf, int buflen); static char* as_utf8_string(oop java_string, int start, int len); static char* as_platform_dependent_str(Handle java_string, TRAPS); - static jchar* as_unicode_string(oop java_string, int& length); + static jchar* as_unicode_string(oop java_string, int& length, TRAPS); // produce an ascii string with all other values quoted using \u#### static char* as_quoted_ascii(oop java_string); @@ -1036,6 +1036,9 @@ class java_lang_invoke_MemberName: AllStatic { static Metadata* vmtarget(oop mname); static void set_vmtarget(oop mname, Metadata* target); +#if INCLUDE_JVMTI + static void adjust_vmtarget(oop mname, Metadata* target); +#endif // INCLUDE_JVMTI static intptr_t vmindex(oop mname); static void set_vmindex(oop mname, intptr_t index); diff --git a/hotspot/src/share/vm/classfile/stackMapFrame.hpp b/hotspot/src/share/vm/classfile/stackMapFrame.hpp index bdeb956ba68..237accec0d4 100644 --- a/hotspot/src/share/vm/classfile/stackMapFrame.hpp +++ b/hotspot/src/share/vm/classfile/stackMapFrame.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -175,14 +175,14 @@ class StackMapFrame : public ResourceObj { ErrorContext* ctx, TRAPS) const; inline void set_mark() { -#ifdef DEBUG +#ifdef ASSERT // Put bogus type to indicate it's no longer valid. if (_stack_mark != -1) { for (int i = _stack_mark - 1; i >= _stack_size; --i) { _stack[i] = VerificationType::bogus_type(); } } -#endif // def DEBUG +#endif // def ASSERT _stack_mark = _stack_size; } diff --git a/hotspot/src/share/vm/classfile/symbolTable.cpp b/hotspot/src/share/vm/classfile/symbolTable.cpp index 0f8da2d895e..b36432a3c10 100644 --- a/hotspot/src/share/vm/classfile/symbolTable.cpp +++ b/hotspot/src/share/vm/classfile/symbolTable.cpp @@ -735,7 +735,7 @@ oop StringTable::intern(oop string, TRAPS) ResourceMark rm(THREAD); int length; Handle h_string (THREAD, string); - jchar* chars = java_lang_String::as_unicode_string(string, length); + jchar* chars = java_lang_String::as_unicode_string(string, length, CHECK_NULL); oop result = intern(h_string, chars, length, CHECK_NULL); return result; } diff --git a/hotspot/src/share/vm/code/codeBlob.cpp b/hotspot/src/share/vm/code/codeBlob.cpp index 6120d3535ae..a855ade0f5f 100644 --- a/hotspot/src/share/vm/code/codeBlob.cpp +++ b/hotspot/src/share/vm/code/codeBlob.cpp @@ -348,14 +348,14 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, void* RuntimeStub::operator new(size_t s, unsigned size) { - void* p = CodeCache::allocate(size); + void* p = CodeCache::allocate(size, true); if (!p) fatal("Initial size of CodeCache is too small"); return p; } // operator new shared by all singletons: void* SingletonBlob::operator new(size_t s, unsigned size) { - void* p = CodeCache::allocate(size); + void* p = CodeCache::allocate(size, true); if (!p) fatal("Initial size of CodeCache is too small"); return p; } diff --git a/hotspot/src/share/vm/code/codeCache.cpp b/hotspot/src/share/vm/code/codeCache.cpp index d5b8f8f9a78..f7be307b254 100644 --- a/hotspot/src/share/vm/code/codeCache.cpp +++ b/hotspot/src/share/vm/code/codeCache.cpp @@ -172,7 +172,7 @@ nmethod* CodeCache::next_nmethod (CodeBlob* cb) { static size_t maxCodeCacheUsed = 0; -CodeBlob* CodeCache::allocate(int size) { +CodeBlob* CodeCache::allocate(int size, bool is_critical) { // Do not seize the CodeCache lock here--if the caller has not // already done so, we are going to lose bigtime, since the code // cache will contain a garbage CodeBlob until the caller can @@ -183,7 +183,7 @@ CodeBlob* CodeCache::allocate(int size) { CodeBlob* cb = NULL; _number_of_blobs++; while (true) { - cb = (CodeBlob*)_heap->allocate(size); + cb = (CodeBlob*)_heap->allocate(size, is_critical); if (cb != NULL) break; if (!_heap->expand_by(CodeCacheExpansionSize)) { // Expansion failed @@ -192,8 +192,8 @@ CodeBlob* CodeCache::allocate(int size) { if (PrintCodeCacheExtension) { ResourceMark rm; tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)", - (intptr_t)_heap->begin(), (intptr_t)_heap->end(), - (address)_heap->end() - (address)_heap->begin()); + (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(), + (address)_heap->high() - (address)_heap->low_boundary()); } } maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() - @@ -463,8 +463,10 @@ void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { } #endif //PRODUCT - -nmethod* CodeCache::find_and_remove_saved_code(Method* m) { +/** + * Remove and return nmethod from the saved code list in order to reanimate it. + */ +nmethod* CodeCache::reanimate_saved_code(Method* m) { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); nmethod* saved = _saved_nmethods; nmethod* prev = NULL; @@ -479,7 +481,7 @@ nmethod* CodeCache::find_and_remove_saved_code(Method* m) { saved->set_speculatively_disconnected(false); saved->set_saved_nmethod_link(NULL); if (PrintMethodFlushing) { - saved->print_on(tty, " ### nmethod is reconnected\n"); + saved->print_on(tty, " ### nmethod is reconnected"); } if (LogCompilation && (xtty != NULL)) { ttyLocker ttyl; @@ -496,6 +498,9 @@ nmethod* CodeCache::find_and_remove_saved_code(Method* m) { return NULL; } +/** + * Remove nmethod from the saved code list in order to discard it permanently + */ void CodeCache::remove_saved_code(nmethod* nm) { // For conc swpr this will be called with CodeCache_lock taken by caller assert_locked_or_safepoint(CodeCache_lock); @@ -529,7 +534,7 @@ void CodeCache::speculatively_disconnect(nmethod* nm) { nm->set_saved_nmethod_link(_saved_nmethods); _saved_nmethods = nm; if (PrintMethodFlushing) { - nm->print_on(tty, " ### nmethod is speculatively disconnected\n"); + nm->print_on(tty, " ### nmethod is speculatively disconnected"); } if (LogCompilation && (xtty != NULL)) { ttyLocker ttyl; @@ -608,13 +613,13 @@ void CodeCache::verify_oops() { address CodeCache::first_address() { assert_locked_or_safepoint(CodeCache_lock); - return (address)_heap->begin(); + return (address)_heap->low_boundary(); } address CodeCache::last_address() { assert_locked_or_safepoint(CodeCache_lock); - return (address)_heap->end(); + return (address)_heap->high(); } @@ -996,10 +1001,9 @@ void CodeCache::print() { void CodeCache::print_summary(outputStream* st, bool detailed) { size_t total = (_heap->high_boundary() - _heap->low_boundary()); st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT - "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT - "Kb max_free_chunk=" SIZE_FORMAT "Kb", + "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", total/K, (total - unallocated_capacity())/K, - maxCodeCacheUsed/K, unallocated_capacity()/K, largest_free_block()/K); + maxCodeCacheUsed/K, unallocated_capacity()/K); if (detailed) { st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", @@ -1018,19 +1022,8 @@ void CodeCache::print_summary(outputStream* st, bool detailed) { void CodeCache::log_state(outputStream* st) { st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" - " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'" - " largest_free_block='" SIZE_FORMAT "'", + " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", nof_blobs(), nof_nmethods(), nof_adapters(), - unallocated_capacity(), largest_free_block()); + unallocated_capacity()); } -size_t CodeCache::largest_free_block() { - // This is called both with and without CodeCache_lock held so - // handle both cases. - if (CodeCache_lock->owned_by_self()) { - return _heap->largest_free_block(); - } else { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - return _heap->largest_free_block(); - } -} diff --git a/hotspot/src/share/vm/code/codeCache.hpp b/hotspot/src/share/vm/code/codeCache.hpp index e19aec61b79..38799019b8f 100644 --- a/hotspot/src/share/vm/code/codeCache.hpp +++ b/hotspot/src/share/vm/code/codeCache.hpp @@ -57,7 +57,7 @@ class CodeCache : AllStatic { static int _number_of_nmethods_with_dependencies; static bool _needs_cache_clean; static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link() - static nmethod* _saved_nmethods; // linked via nm->saved_nmethod_look() + static nmethod* _saved_nmethods; // Linked list of speculatively disconnected nmethods. static void verify_if_often() PRODUCT_RETURN; @@ -70,7 +70,7 @@ class CodeCache : AllStatic { static void initialize(); // Allocation/administration - static CodeBlob* allocate(int size); // allocates a new CodeBlob + static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled static int alignment_unit(); // guaranteed alignment of all CodeBlobs static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header) @@ -156,25 +156,19 @@ class CodeCache : AllStatic { static address low_bound() { return (address) _heap->low_boundary(); } static address high_bound() { return (address) _heap->high_boundary(); } - static bool has_space(int size) { - // Always leave some room in the CodeCache for I2C/C2I adapters - return largest_free_block() > (CodeCacheMinimumFreeSpace + size); - } - // Profiling static address first_address(); // first address used for CodeBlobs static address last_address(); // last address used for CodeBlobs static size_t capacity() { return _heap->capacity(); } static size_t max_capacity() { return _heap->max_capacity(); } static size_t unallocated_capacity() { return _heap->unallocated_capacity(); } - static size_t largest_free_block(); - static bool needs_flushing() { return largest_free_block() < CodeCacheFlushingMinimumFreeSpace; } + static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; } static bool needs_cache_clean() { return _needs_cache_clean; } static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; } static void clear_inline_caches(); // clear all inline caches - static nmethod* find_and_remove_saved_code(Method* m); + static nmethod* reanimate_saved_code(Method* m); static void remove_saved_code(nmethod* nm); static void speculatively_disconnect(nmethod* nm); diff --git a/hotspot/src/share/vm/code/compiledIC.cpp b/hotspot/src/share/vm/code/compiledIC.cpp index 85eeda47056..e9a63b86623 100644 --- a/hotspot/src/share/vm/code/compiledIC.cpp +++ b/hotspot/src/share/vm/code/compiledIC.cpp @@ -45,25 +45,6 @@ // Every time a compiled IC is changed or its type is being accessed, // either the CompiledIC_lock must be set or we must be at a safe point. - -// Release the CompiledICHolder* associated with this call site is there is one. -void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) { - // This call site might have become stale so inspect it carefully. - NativeCall* call = nativeCall_at(call_site->addr()); - if (is_icholder_entry(call->destination())) { - NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value()); - InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data()); - } -} - - -bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { - // This call site might have become stale so inspect it carefully. - NativeCall* call = nativeCall_at(call_site->addr()); - return is_icholder_entry(call->destination()); -} - - //----------------------------------------------------------------------------- // Low-level access to an inline cache. Private, since they might not be // MT-safe to use. @@ -488,33 +469,6 @@ bool CompiledIC::is_icholder_entry(address entry) { return (cb != NULL && cb->is_adapter_blob()); } - -CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) - : _ic_call(call) -{ - address ic_call = call->instruction_address(); - - assert(ic_call != NULL, "ic_call address must be set"); - assert(nm != NULL, "must pass nmethod"); - assert(nm->contains(ic_call), "must be in nmethod"); - - // search for the ic_call at the given address - RelocIterator iter(nm, ic_call, ic_call+1); - bool ret = iter.next(); - assert(ret == true, "relocInfo must exist at this address"); - assert(iter.addr() == ic_call, "must find ic_call"); - if (iter.type() == relocInfo::virtual_call_type) { - virtual_call_Relocation* r = iter.virtual_call_reloc(); - _is_optimized = false; - _value = nativeMovConstReg_at(r->cached_value()); - } else { - assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); - _is_optimized = true; - _value = NULL; -} -} - - // ---------------------------------------------------------------------------- void CompiledStaticCall::set_to_clean() { @@ -549,33 +503,6 @@ bool CompiledStaticCall::is_call_to_interpreted() const { return nm->stub_contains(destination()); } - -void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { - address stub=find_stub(); - guarantee(stub != NULL, "stub not found"); - - if (TraceICs) { - ResourceMark rm; - tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", - instruction_address(), - callee->name_and_sig_as_C_string()); - } - - NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object - NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); - - assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), "a) MT-unsafe modification of inline cache"); - assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, "b) MT-unsafe modification of inline cache"); - - // Update stub - method_holder->set_data((intptr_t)callee()); - jump->set_jump_destination(entry); - - // Update jump to call - set_destination_mt_safe(stub); -} - - void CompiledStaticCall::set(const StaticCallInfo& info) { assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); @@ -618,19 +545,6 @@ void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) { } } - -void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { - assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); - // Reset stub - address stub = static_stub->addr(); - assert(stub!=NULL, "stub not found"); - NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object - NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); - method_holder->set_data(0); - jump->set_jump_destination((address)-1); -} - - address CompiledStaticCall::find_stub() { // Find reloc. information containing this call-site RelocIterator iter((nmethod*)NULL, instruction_address()); @@ -668,19 +582,16 @@ void CompiledIC::verify() { || is_optimized() || is_megamorphic(), "sanity check"); } - void CompiledIC::print() { print_compiled_ic(); tty->cr(); } - void CompiledIC::print_compiled_ic() { tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT, instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination(), is_optimized() ? NULL : cached_value()); } - void CompiledStaticCall::print() { tty->print("static call at " INTPTR_FORMAT " -> ", instruction_address()); if (is_clean()) { @@ -693,21 +604,4 @@ void CompiledStaticCall::print() { tty->cr(); } -void CompiledStaticCall::verify() { - // Verify call - NativeCall::verify(); - if (os::is_MP()) { - verify_alignment(); - } - - // Verify stub - address stub = find_stub(); - assert(stub != NULL, "no stub found for static call"); - NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object - NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); - - // Verify state - assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); -} - -#endif +#endif // !PRODUCT diff --git a/hotspot/src/share/vm/code/compiledIC.hpp b/hotspot/src/share/vm/code/compiledIC.hpp index 79c39742746..96cb0a582bb 100644 --- a/hotspot/src/share/vm/code/compiledIC.hpp +++ b/hotspot/src/share/vm/code/compiledIC.hpp @@ -304,6 +304,11 @@ class CompiledStaticCall: public NativeCall { friend CompiledStaticCall* compiledStaticCall_at(address native_call); friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site); + // Code + static void emit_to_interp_stub(CodeBuffer &cbuf); + static int to_interp_stub_size(); + static int reloc_to_interp_stub(); + // State bool is_clean() const; bool is_call_to_compiled() const; diff --git a/hotspot/src/share/vm/code/nmethod.cpp b/hotspot/src/share/vm/code/nmethod.cpp index 55a2c05f5ff..60bc88b7734 100644 --- a/hotspot/src/share/vm/code/nmethod.cpp +++ b/hotspot/src/share/vm/code/nmethod.cpp @@ -501,18 +501,17 @@ nmethod* nmethod::new_native_nmethod(methodHandle method, { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod)); - if (CodeCache::has_space(native_nmethod_size)) { - CodeOffsets offsets; - offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); - offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); - nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size, - compile_id, &offsets, - code_buffer, frame_size, - basic_lock_owner_sp_offset, - basic_lock_sp_offset, oop_maps); - NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm)); - if (PrintAssembly && nm != NULL) - Disassembler::decode(nm); + CodeOffsets offsets; + offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); + offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); + nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size, + compile_id, &offsets, + code_buffer, frame_size, + basic_lock_owner_sp_offset, + basic_lock_sp_offset, oop_maps); + NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm)); + if (PrintAssembly && nm != NULL) { + Disassembler::decode(nm); } } // verify nmethod @@ -538,18 +537,17 @@ nmethod* nmethod::new_dtrace_nmethod(methodHandle method, { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); int nmethod_size = allocation_size(code_buffer, sizeof(nmethod)); - if (CodeCache::has_space(nmethod_size)) { - CodeOffsets offsets; - offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); - offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset); - offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); + CodeOffsets offsets; + offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); + offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset); + offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); - nm = new (nmethod_size) nmethod(method(), nmethod_size, - &offsets, code_buffer, frame_size); + nm = new (nmethod_size) nmethod(method(), nmethod_size, + &offsets, code_buffer, frame_size); - NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm)); - if (PrintAssembly && nm != NULL) - Disassembler::decode(nm); + NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm)); + if (PrintAssembly && nm != NULL) { + Disassembler::decode(nm); } } // verify nmethod @@ -591,16 +589,16 @@ nmethod* nmethod::new_nmethod(methodHandle method, + round_to(handler_table->size_in_bytes(), oopSize) + round_to(nul_chk_table->size_in_bytes(), oopSize) + round_to(debug_info->data_size() , oopSize); - if (CodeCache::has_space(nmethod_size)) { - nm = new (nmethod_size) - nmethod(method(), nmethod_size, compile_id, entry_bci, offsets, - orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, - oop_maps, - handler_table, - nul_chk_table, - compiler, - comp_level); - } + + nm = new (nmethod_size) + nmethod(method(), nmethod_size, compile_id, entry_bci, offsets, + orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, + oop_maps, + handler_table, + nul_chk_table, + compiler, + comp_level); + if (nm != NULL) { // To make dependency checking during class loading fast, record // the nmethod dependencies in the classes it is dependent on. @@ -612,15 +610,18 @@ nmethod* nmethod::new_nmethod(methodHandle method, // classes the slow way is too slow. for (Dependencies::DepStream deps(nm); deps.next(); ) { Klass* klass = deps.context_type(); - if (klass == NULL) continue; // ignore things like evol_method + if (klass == NULL) { + continue; // ignore things like evol_method + } // record this nmethod as dependent on this klass InstanceKlass::cast(klass)->add_dependent_nmethod(nm); } } NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm)); - if (PrintAssembly && nm != NULL) + if (PrintAssembly && nm != NULL) { Disassembler::decode(nm); + } } // verify nmethod @@ -798,13 +799,11 @@ nmethod::nmethod( } #endif // def HAVE_DTRACE_H -void* nmethod::operator new(size_t size, int nmethod_size) { - void* alloc = CodeCache::allocate(nmethod_size); - guarantee(alloc != NULL, "CodeCache should have enough space"); - return alloc; +void* nmethod::operator new(size_t size, int nmethod_size) throw () { + // Not critical, may return null if there is too little continuous memory + return CodeCache::allocate(nmethod_size); } - nmethod::nmethod( Method* method, int nmethod_size, diff --git a/hotspot/src/share/vm/code/stubs.cpp b/hotspot/src/share/vm/code/stubs.cpp index 930c637fac9..a826a3550d3 100644 --- a/hotspot/src/share/vm/code/stubs.cpp +++ b/hotspot/src/share/vm/code/stubs.cpp @@ -67,7 +67,7 @@ StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size, intptr_t size = round_to(buffer_size, 2*BytesPerWord); BufferBlob* blob = BufferBlob::create(name, size); if( blob == NULL) { - vm_exit_out_of_memory(size, err_msg("CodeCache: no room for %s", name)); + vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, err_msg("CodeCache: no room for %s", name)); } _stub_interface = stub_interface; _buffer_size = blob->content_size(); diff --git a/hotspot/src/share/vm/code/vtableStubs.cpp b/hotspot/src/share/vm/code/vtableStubs.cpp index df46601635a..b0807145330 100644 --- a/hotspot/src/share/vm/code/vtableStubs.cpp +++ b/hotspot/src/share/vm/code/vtableStubs.cpp @@ -60,7 +60,7 @@ void* VtableStub::operator new(size_t size, int code_size) { const int bytes = chunk_factor * real_size + pd_code_alignment(); BufferBlob* blob = BufferBlob::create("vtable chunks", bytes); if (blob == NULL) { - vm_exit_out_of_memory(bytes, "CodeCache: no room for vtable chunks"); + vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "CodeCache: no room for vtable chunks"); } _chunk = blob->content_begin(); _chunk_end = _chunk + bytes; diff --git a/hotspot/src/share/vm/compiler/compileBroker.cpp b/hotspot/src/share/vm/compiler/compileBroker.cpp index de3df50bdf4..086bff881cd 100644 --- a/hotspot/src/share/vm/compiler/compileBroker.cpp +++ b/hotspot/src/share/vm/compiler/compileBroker.cpp @@ -65,7 +65,7 @@ HS_DTRACE_PROBE_DECL8(hotspot, method__compile__begin, HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end, char*, intptr_t, char*, intptr_t, char*, intptr_t, char*, intptr_t, bool); -#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name) \ +#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \ { \ Symbol* klass_name = (method)->klass_name(); \ Symbol* name = (method)->name(); \ @@ -77,8 +77,7 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end, signature->bytes(), signature->utf8_length()); \ } -#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method, \ - comp_name, success) \ +#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \ { \ Symbol* klass_name = (method)->klass_name(); \ Symbol* name = (method)->name(); \ @@ -92,7 +91,7 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end, #else /* USDT2 */ -#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name) \ +#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \ { \ Symbol* klass_name = (method)->klass_name(); \ Symbol* name = (method)->name(); \ @@ -104,8 +103,7 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end, (char *) signature->bytes(), signature->utf8_length()); \ } -#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method, \ - comp_name, success) \ +#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \ { \ Symbol* klass_name = (method)->klass_name(); \ Symbol* name = (method)->name(); \ @@ -120,8 +118,8 @@ HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end, #else // ndef DTRACE_ENABLED -#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name) -#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method, comp_name, success) +#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) +#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) #endif // ndef DTRACE_ENABLED @@ -1229,7 +1227,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci, if (method->is_not_compilable(comp_level)) return NULL; if (UseCodeCacheFlushing) { - nmethod* saved = CodeCache::find_and_remove_saved_code(method()); + nmethod* saved = CodeCache::reanimate_saved_code(method()); if (saved != NULL) { method->set_code(method, saved); return saved; @@ -1288,9 +1286,9 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci, method->jmethod_id(); } - // If the compiler is shut off due to code cache flushing or otherwise, + // If the compiler is shut off due to code cache getting full // fail out now so blocking compiles dont hang the java thread - if (!should_compile_new_jobs() || (UseCodeCacheFlushing && CodeCache::needs_flushing())) { + if (!should_compile_new_jobs()) { CompilationPolicy::policy()->delay_compilation(method()); return NULL; } @@ -1581,7 +1579,7 @@ void CompileBroker::compiler_thread_loop() { // We need this HandleMark to avoid leaking VM handles. HandleMark hm(thread); - if (CodeCache::largest_free_block() < CodeCacheMinimumFreeSpace) { + if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) { // the code cache is really full handle_full_code_cache(); } else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) { @@ -1766,8 +1764,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { // Save information about this method in case of failure. set_last_compile(thread, method, is_osr, task_level); - DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler(task_level), method, - compiler_name(task_level)); + DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level)); } // Allocate a new set of JNI handles. @@ -1842,13 +1839,14 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { } } } + // simulate crash during compilation + assert(task->compile_id() != CICrashAt, "just as planned"); } pop_jni_handle_block(); methodHandle method(thread, task->method()); - DTRACE_METHOD_COMPILE_END_PROBE(compiler(task_level), method, - compiler_name(task_level), task->is_success()); + DTRACE_METHOD_COMPILE_END_PROBE(method, compiler_name(task_level), task->is_success()); collect_statistics(thread, time, task); diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp index c609f10f620..ffb7531caac 100644 --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @@ -2444,8 +2444,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) { // initial marking in checkpointRootsInitialWork has been completed if (VerifyDuringGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - gclog_or_tty->print("Verify before initial mark: "); - Universe::verify(); + Universe::verify("Verify before initial mark: "); } { bool res = markFromRoots(false); @@ -2456,8 +2455,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) { case FinalMarking: if (VerifyDuringGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - gclog_or_tty->print("Verify before re-mark: "); - Universe::verify(); + Universe::verify("Verify before re-mark: "); } checkpointRootsFinal(false, clear_all_soft_refs, init_mark_was_synchronous); @@ -2468,8 +2466,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) { // final marking in checkpointRootsFinal has been completed if (VerifyDuringGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - gclog_or_tty->print("Verify before sweep: "); - Universe::verify(); + Universe::verify("Verify before sweep: "); } sweep(false); assert(_collectorState == Resizing, "Incorrect state"); @@ -2484,8 +2481,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) { // The heap has been resized. if (VerifyDuringGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - gclog_or_tty->print("Verify before reset: "); - Universe::verify(); + Universe::verify("Verify before reset: "); } reset(false); assert(_collectorState == Idling, "Collector state should " @@ -2853,8 +2849,8 @@ class VerifyMarkedClosure: public BitMapClosure { bool failed() { return _failed; } }; -bool CMSCollector::verify_after_remark() { - gclog_or_tty->print(" [Verifying CMS Marking... "); +bool CMSCollector::verify_after_remark(bool silent) { + if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... "); MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); static bool init = false; @@ -2915,7 +2911,7 @@ bool CMSCollector::verify_after_remark() { warning("Unrecognized value %d for CMSRemarkVerifyVariant", CMSRemarkVerifyVariant); } - gclog_or_tty->print(" done] "); + if (!silent) gclog_or_tty->print(" done] "); return true; } @@ -3426,8 +3422,9 @@ bool ConcurrentMarkSweepGeneration::grow_to_reserved() { void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) { assert_locked_or_safepoint(Heap_lock); assert_lock_strong(freelistLock()); - // XXX Fix when compaction is implemented. - warning("Shrinking of CMS not yet implemented"); + if (PrintGCDetails && Verbose) { + warning("Shrinking of CMS not yet implemented"); + } return; } @@ -6010,26 +6007,23 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) { &cmsDrainMarkingStackClosure, NULL); } - verify_work_stacks_empty(); } + // This is the point where the entire marking should have completed. + verify_work_stacks_empty(); + if (should_unload_classes()) { { TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty); - // Follow SystemDictionary roots and unload classes + // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); - // Follow CodeCache roots and unload any methods marked for unloading + // Unload nmethods. CodeCache::do_unloading(&_is_alive_closure, purged_class); - cmsDrainMarkingStackClosure.do_void(); - verify_work_stacks_empty(); - - // Update subklass/sibling/implementor links in KlassKlass descendants + // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(&_is_alive_closure); - // Nothing should have been pushed onto the working stacks. - verify_work_stacks_empty(); } { @@ -6043,11 +6037,10 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) { // Need to check if we really scanned the StringTable. if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) { TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty); - // Now clean up stale oops in StringTable + // Delete entries for dead interned strings. StringTable::unlink(&_is_alive_closure); } - verify_work_stacks_empty(); // Restore any preserved marks as a result of mark stack or // work queue overflow restore_preserved_marks_if_any(); // done single-threaded for now @@ -6921,7 +6914,7 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( size = CompactibleFreeListSpace::adjustObjectSize( p->oop_iterate(_scanningClosure)); } - #ifdef DEBUG + #ifdef ASSERT size_t direct_size = CompactibleFreeListSpace::adjustObjectSize(p->size()); assert(size == direct_size, "Inconsistency in size"); @@ -6933,7 +6926,7 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( assert(_bitMap->isMarked(addr+size-1), "inconsistent Printezis mark"); } - #endif // DEBUG + #endif // ASSERT } else { // an unitialized object assert(_bitMap->isMarked(addr+1), "missing Printezis mark?"); @@ -7075,14 +7068,14 @@ bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) { HeapWord* addr = (HeapWord*)p; assert(_span.contains(addr), "we are scanning the CMS generation"); bool is_obj_array = false; - #ifdef DEBUG + #ifdef ASSERT if (!_parallel) { assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)"); assert(_collector->overflow_list_is_empty(), "overflow list should be empty"); } - #endif // DEBUG + #endif // ASSERT if (_bit_map->isMarked(addr)) { // Obj arrays are precisely marked, non-arrays are not; // so we scan objArrays precisely and non-arrays in their @@ -7102,14 +7095,14 @@ bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) { } } } - #ifdef DEBUG + #ifdef ASSERT if (!_parallel) { assert(_mark_stack->isEmpty(), "post-condition (eager drainage)"); assert(_collector->overflow_list_is_empty(), "overflow list should be empty"); } - #endif // DEBUG + #endif // ASSERT return is_obj_array; } @@ -8320,7 +8313,7 @@ size_t SweepClosure::do_live_chunk(FreeChunk* fc) { assert(size == CompactibleFreeListSpace::adjustObjectSize(size), "alignment problem"); -#ifdef DEBUG +#ifdef ASSERT if (oop(addr)->klass_or_null() != NULL) { // Ignore mark word because we are running concurrent with mutators assert(oop(addr)->is_oop(true), "live block should be an oop"); diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp index aa84d716670..7040f218eb0 100644 --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp @@ -990,7 +990,7 @@ class CMSCollector: public CHeapObj { // debugging void verify(); - bool verify_after_remark(); + bool verify_after_remark(bool silent = VerifySilently); void verify_ok_to_terminate() const PRODUCT_RETURN; void verify_work_stacks_empty() const PRODUCT_RETURN; void verify_overflow_empty() const PRODUCT_RETURN; diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp index 03086a3bfb6..39e5a2792b5 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp @@ -1273,10 +1273,9 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { if (VerifyDuringGC) { HandleMark hm; // handle scope - gclog_or_tty->print(" VerifyDuringGC:(before)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* silent */ false, - /* option */ VerifyOption_G1UsePrevMarking); + Universe::verify(VerifyOption_G1UsePrevMarking, + " VerifyDuringGC:(before)"); } G1CollectorPolicy* g1p = g1h->g1_policy(); @@ -1300,10 +1299,9 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { // Verify the heap w.r.t. the previous marking bitmap. if (VerifyDuringGC) { HandleMark hm; // handle scope - gclog_or_tty->print(" VerifyDuringGC:(overflow)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* silent */ false, - /* option */ VerifyOption_G1UsePrevMarking); + Universe::verify(VerifyOption_G1UsePrevMarking, + " VerifyDuringGC:(overflow)"); } // Clear the marking state because we will be restarting @@ -1323,10 +1321,9 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { if (VerifyDuringGC) { HandleMark hm; // handle scope - gclog_or_tty->print(" VerifyDuringGC:(after)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* silent */ false, - /* option */ VerifyOption_G1UseNextMarking); + Universe::verify(VerifyOption_G1UseNextMarking, + " VerifyDuringGC:(after)"); } assert(!restart_for_overflow(), "sanity"); // Completely reset the marking state since marking completed @@ -1972,10 +1969,9 @@ void ConcurrentMark::cleanup() { if (VerifyDuringGC) { HandleMark hm; // handle scope - gclog_or_tty->print(" VerifyDuringGC:(before)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* silent */ false, - /* option */ VerifyOption_G1UsePrevMarking); + Universe::verify(VerifyOption_G1UsePrevMarking, + " VerifyDuringGC:(before)"); } G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); @@ -2127,10 +2123,9 @@ void ConcurrentMark::cleanup() { if (VerifyDuringGC) { HandleMark hm; // handle scope - gclog_or_tty->print(" VerifyDuringGC:(after)"); Universe::heap()->prepare_for_verify(); - Universe::verify(/* silent */ false, - /* option */ VerifyOption_G1UsePrevMarking); + Universe::verify(VerifyOption_G1UsePrevMarking, + " VerifyDuringGC:(after)"); } g1h->verify_region_sets_optional(); diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp index 9152e7010bf..8fe07693d77 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp @@ -77,7 +77,7 @@ void G1BlockOffsetSharedArray::resize(size_t new_word_size) { assert(delta > 0, "just checking"); if (!_vs.expand_by(delta)) { // Do better than this for Merlin - vm_exit_out_of_memory(delta, "offset table expansion"); + vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion"); } assert(_vs.high() == high + delta, "invalid expansion"); // Initialization of the contents is left to the diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index fdb602e7012..76ce145ebed 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1271,9 +1271,8 @@ double G1CollectedHeap::verify(bool guard, const char* msg) { if (guard && total_collections() >= VerifyGCStartAt) { double verify_start = os::elapsedTime(); HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(msg); prepare_for_verify(); - Universe::verify(false /* silent */, VerifyOption_G1UsePrevMarking); + Universe::verify(VerifyOption_G1UsePrevMarking, msg); verify_time_ms = (os::elapsedTime() - verify_start) * 1000; } @@ -1304,7 +1303,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, print_heap_before_gc(); - size_t metadata_prev_used = MetaspaceAux::used_in_bytes(); + size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); HRSPhaseSetter x(HRSPhaseFullGC); verify_region_sets_optional(); @@ -1322,234 +1321,241 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); - TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty); - TraceCollectorStats tcs(g1mm()->full_collection_counters()); - TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); - - double start = os::elapsedTime(); - g1_policy()->record_full_collection_start(); - - // Note: When we have a more flexible GC logging framework that - // allows us to add optional attributes to a GC log record we - // could consider timing and reporting how long we wait in the - // following two methods. - wait_while_free_regions_coming(); - // If we start the compaction before the CM threads finish - // scanning the root regions we might trip them over as we'll - // be moving objects / updating references. So let's wait until - // they are done. By telling them to abort, they should complete - // early. - _cm->root_regions()->abort(); - _cm->root_regions()->wait_until_scan_finished(); - append_secondary_free_list_if_not_empty_with_lock(); - - gc_prologue(true); - increment_total_collections(true /* full gc */); - increment_old_marking_cycles_started(); - - size_t g1h_prev_used = used(); - assert(used() == recalculate_used(), "Should be equal"); - - verify_before_gc(); - - pre_full_gc_dump(); - - COMPILER2_PRESENT(DerivedPointerTable::clear()); - - // Disable discovery and empty the discovered lists - // for the CM ref processor. - ref_processor_cm()->disable_discovery(); - ref_processor_cm()->abandon_partial_discovery(); - ref_processor_cm()->verify_no_references_recorded(); - - // Abandon current iterations of concurrent marking and concurrent - // refinement, if any are in progress. We have to do this before - // wait_until_scan_finished() below. - concurrent_mark()->abort(); - - // Make sure we'll choose a new allocation region afterwards. - release_mutator_alloc_region(); - abandon_gc_alloc_regions(); - g1_rem_set()->cleanupHRRS(); - - // We should call this after we retire any currently active alloc - // regions so that all the ALLOC / RETIRE events are generated - // before the start GC event. - _hr_printer.start_gc(true /* full */, (size_t) total_collections()); - - // We may have added regions to the current incremental collection - // set between the last GC or pause and now. We need to clear the - // incremental collection set and then start rebuilding it afresh - // after this full GC. - abandon_collection_set(g1_policy()->inc_cset_head()); - g1_policy()->clear_incremental_cset(); - g1_policy()->stop_incremental_cset_building(); - - tear_down_region_sets(false /* free_list_only */); - g1_policy()->set_gcs_are_young(true); - - // See the comments in g1CollectedHeap.hpp and - // G1CollectedHeap::ref_processing_init() about - // how reference processing currently works in G1. - - // Temporarily make discovery by the STW ref processor single threaded (non-MT). - ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false); - - // Temporarily clear the STW ref processor's _is_alive_non_header field. - ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL); - - ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); - ref_processor_stw()->setup_policy(do_clear_all_soft_refs); - - // Do collection work { - HandleMark hm; // Discard invalid handles created during gc - G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs); - } + TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty); + TraceCollectorStats tcs(g1mm()->full_collection_counters()); + TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); - assert(free_regions() == 0, "we should not have added any free regions"); - rebuild_region_sets(false /* free_list_only */); + double start = os::elapsedTime(); + g1_policy()->record_full_collection_start(); - // Enqueue any discovered reference objects that have - // not been removed from the discovered lists. - ref_processor_stw()->enqueue_discovered_references(); + // Note: When we have a more flexible GC logging framework that + // allows us to add optional attributes to a GC log record we + // could consider timing and reporting how long we wait in the + // following two methods. + wait_while_free_regions_coming(); + // If we start the compaction before the CM threads finish + // scanning the root regions we might trip them over as we'll + // be moving objects / updating references. So let's wait until + // they are done. By telling them to abort, they should complete + // early. + _cm->root_regions()->abort(); + _cm->root_regions()->wait_until_scan_finished(); + append_secondary_free_list_if_not_empty_with_lock(); - COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); + gc_prologue(true); + increment_total_collections(true /* full gc */); + increment_old_marking_cycles_started(); - MemoryService::track_memory_usage(); + assert(used() == recalculate_used(), "Should be equal"); - verify_after_gc(); + verify_before_gc(); - assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); - ref_processor_stw()->verify_no_references_recorded(); + pre_full_gc_dump(); - // Delete metaspaces for unloaded class loaders and clean up loader_data graph - ClassLoaderDataGraph::purge(); + COMPILER2_PRESENT(DerivedPointerTable::clear()); - // Note: since we've just done a full GC, concurrent - // marking is no longer active. Therefore we need not - // re-enable reference discovery for the CM ref processor. - // That will be done at the start of the next marking cycle. - assert(!ref_processor_cm()->discovery_enabled(), "Postcondition"); - ref_processor_cm()->verify_no_references_recorded(); + // Disable discovery and empty the discovered lists + // for the CM ref processor. + ref_processor_cm()->disable_discovery(); + ref_processor_cm()->abandon_partial_discovery(); + ref_processor_cm()->verify_no_references_recorded(); - reset_gc_time_stamp(); - // Since everything potentially moved, we will clear all remembered - // sets, and clear all cards. Later we will rebuild remebered - // sets. We will also reset the GC time stamps of the regions. - clear_rsets_post_compaction(); - check_gc_time_stamps(); + // Abandon current iterations of concurrent marking and concurrent + // refinement, if any are in progress. We have to do this before + // wait_until_scan_finished() below. + concurrent_mark()->abort(); - // Resize the heap if necessary. - resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); + // Make sure we'll choose a new allocation region afterwards. + release_mutator_alloc_region(); + abandon_gc_alloc_regions(); + g1_rem_set()->cleanupHRRS(); - if (_hr_printer.is_active()) { - // We should do this after we potentially resize the heap so - // that all the COMMIT / UNCOMMIT events are generated before - // the end GC event. + // We should call this after we retire any currently active alloc + // regions so that all the ALLOC / RETIRE events are generated + // before the start GC event. + _hr_printer.start_gc(true /* full */, (size_t) total_collections()); - print_hrs_post_compaction(); - _hr_printer.end_gc(true /* full */, (size_t) total_collections()); - } + // We may have added regions to the current incremental collection + // set between the last GC or pause and now. We need to clear the + // incremental collection set and then start rebuilding it afresh + // after this full GC. + abandon_collection_set(g1_policy()->inc_cset_head()); + g1_policy()->clear_incremental_cset(); + g1_policy()->stop_incremental_cset_building(); - if (_cg1r->use_cache()) { - _cg1r->clear_and_record_card_counts(); - _cg1r->clear_hot_cache(); - } + tear_down_region_sets(false /* free_list_only */); + g1_policy()->set_gcs_are_young(true); - // Rebuild remembered sets of all regions. - if (G1CollectedHeap::use_parallel_gc_threads()) { - uint n_workers = - AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), - workers()->active_workers(), - Threads::number_of_non_daemon_threads()); - assert(UseDynamicNumberOfGCThreads || - n_workers == workers()->total_workers(), - "If not dynamic should be using all the workers"); - workers()->set_active_workers(n_workers); - // Set parallel threads in the heap (_n_par_threads) only - // before a parallel phase and always reset it to 0 after - // the phase so that the number of parallel threads does - // no get carried forward to a serial phase where there - // may be code that is "possibly_parallel". - set_par_threads(n_workers); + // See the comments in g1CollectedHeap.hpp and + // G1CollectedHeap::ref_processing_init() about + // how reference processing currently works in G1. - ParRebuildRSTask rebuild_rs_task(this); - assert(check_heap_region_claim_values( - HeapRegion::InitialClaimValue), "sanity check"); - assert(UseDynamicNumberOfGCThreads || - workers()->active_workers() == workers()->total_workers(), - "Unless dynamic should use total workers"); - // Use the most recent number of active workers - assert(workers()->active_workers() > 0, - "Active workers not properly set"); - set_par_threads(workers()->active_workers()); - workers()->run_task(&rebuild_rs_task); - set_par_threads(0); - assert(check_heap_region_claim_values( - HeapRegion::RebuildRSClaimValue), "sanity check"); - reset_heap_region_claim_values(); - } else { - RebuildRSOutOfRegionClosure rebuild_rs(this); - heap_region_iterate(&rebuild_rs); - } + // Temporarily make discovery by the STW ref processor single threaded (non-MT). + ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false); - if (G1Log::fine()) { - print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); - } + // Temporarily clear the STW ref processor's _is_alive_non_header field. + ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL); - if (true) { // FIXME - MetaspaceGC::compute_new_size(); - } + ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); + ref_processor_stw()->setup_policy(do_clear_all_soft_refs); - // Start a new incremental collection set for the next pause - assert(g1_policy()->collection_set() == NULL, "must be"); - g1_policy()->start_incremental_cset_building(); + // Do collection work + { + HandleMark hm; // Discard invalid handles created during gc + G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs); + } - // Clear the _cset_fast_test bitmap in anticipation of adding - // regions to the incremental collection set for the next - // evacuation pause. - clear_cset_fast_test(); + assert(free_regions() == 0, "we should not have added any free regions"); + rebuild_region_sets(false /* free_list_only */); - init_mutator_alloc_region(); + // Enqueue any discovered reference objects that have + // not been removed from the discovered lists. + ref_processor_stw()->enqueue_discovered_references(); - double end = os::elapsedTime(); - g1_policy()->record_full_collection_end(); + COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); + + MemoryService::track_memory_usage(); + + verify_after_gc(); + + assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); + ref_processor_stw()->verify_no_references_recorded(); + + // Delete metaspaces for unloaded class loaders and clean up loader_data graph + ClassLoaderDataGraph::purge(); + MetaspaceAux::verify_metrics(); + + // Note: since we've just done a full GC, concurrent + // marking is no longer active. Therefore we need not + // re-enable reference discovery for the CM ref processor. + // That will be done at the start of the next marking cycle. + assert(!ref_processor_cm()->discovery_enabled(), "Postcondition"); + ref_processor_cm()->verify_no_references_recorded(); + + reset_gc_time_stamp(); + // Since everything potentially moved, we will clear all remembered + // sets, and clear all cards. Later we will rebuild remebered + // sets. We will also reset the GC time stamps of the regions. + clear_rsets_post_compaction(); + check_gc_time_stamps(); + + // Resize the heap if necessary. + resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); + + if (_hr_printer.is_active()) { + // We should do this after we potentially resize the heap so + // that all the COMMIT / UNCOMMIT events are generated before + // the end GC event. + + print_hrs_post_compaction(); + _hr_printer.end_gc(true /* full */, (size_t) total_collections()); + } + + if (_cg1r->use_cache()) { + _cg1r->clear_and_record_card_counts(); + _cg1r->clear_hot_cache(); + } + + // Rebuild remembered sets of all regions. + if (G1CollectedHeap::use_parallel_gc_threads()) { + uint n_workers = + AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), + workers()->active_workers(), + Threads::number_of_non_daemon_threads()); + assert(UseDynamicNumberOfGCThreads || + n_workers == workers()->total_workers(), + "If not dynamic should be using all the workers"); + workers()->set_active_workers(n_workers); + // Set parallel threads in the heap (_n_par_threads) only + // before a parallel phase and always reset it to 0 after + // the phase so that the number of parallel threads does + // no get carried forward to a serial phase where there + // may be code that is "possibly_parallel". + set_par_threads(n_workers); + + ParRebuildRSTask rebuild_rs_task(this); + assert(check_heap_region_claim_values( + HeapRegion::InitialClaimValue), "sanity check"); + assert(UseDynamicNumberOfGCThreads || + workers()->active_workers() == workers()->total_workers(), + "Unless dynamic should use total workers"); + // Use the most recent number of active workers + assert(workers()->active_workers() > 0, + "Active workers not properly set"); + set_par_threads(workers()->active_workers()); + workers()->run_task(&rebuild_rs_task); + set_par_threads(0); + assert(check_heap_region_claim_values( + HeapRegion::RebuildRSClaimValue), "sanity check"); + reset_heap_region_claim_values(); + } else { + RebuildRSOutOfRegionClosure rebuild_rs(this); + heap_region_iterate(&rebuild_rs); + } + + if (true) { // FIXME + MetaspaceGC::compute_new_size(); + } #ifdef TRACESPINNING - ParallelTaskTerminator::print_termination_counts(); + ParallelTaskTerminator::print_termination_counts(); #endif - gc_epilogue(true); + // Discard all rset updates + JavaThread::dirty_card_queue_set().abandon_logs(); + assert(!G1DeferredRSUpdate + || (G1DeferredRSUpdate && + (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); - // Discard all rset updates - JavaThread::dirty_card_queue_set().abandon_logs(); - assert(!G1DeferredRSUpdate - || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); + _young_list->reset_sampled_info(); + // At this point there should be no regions in the + // entire heap tagged as young. + assert(check_young_list_empty(true /* check_heap */), + "young list should be empty at this point"); - _young_list->reset_sampled_info(); - // At this point there should be no regions in the - // entire heap tagged as young. - assert( check_young_list_empty(true /* check_heap */), - "young list should be empty at this point"); + // Update the number of full collections that have been completed. + increment_old_marking_cycles_completed(false /* concurrent */); - // Update the number of full collections that have been completed. - increment_old_marking_cycles_completed(false /* concurrent */); + _hrs.verify_optional(); + verify_region_sets_optional(); - _hrs.verify_optional(); - verify_region_sets_optional(); + // Start a new incremental collection set for the next pause + assert(g1_policy()->collection_set() == NULL, "must be"); + g1_policy()->start_incremental_cset_building(); + + // Clear the _cset_fast_test bitmap in anticipation of adding + // regions to the incremental collection set for the next + // evacuation pause. + clear_cset_fast_test(); + + init_mutator_alloc_region(); + + double end = os::elapsedTime(); + g1_policy()->record_full_collection_end(); + + if (G1Log::fine()) { + g1_policy()->print_heap_transition(); + } + + // We must call G1MonitoringSupport::update_sizes() in the same scoping level + // as an active TraceMemoryManagerStats object (i.e. before the destructor for the + // TraceMemoryManagerStats is called) so that the G1 memory pools are updated + // before any GC notifications are raised. + g1mm()->update_sizes(); + + gc_epilogue(true); + } + + if (G1Log::finer()) { + g1_policy()->print_detailed_heap_transition(); + } print_heap_after_gc(); - // We must call G1MonitoringSupport::update_sizes() in the same scoping level - // as an active TraceMemoryManagerStats object (i.e. before the destructor for the - // TraceMemoryManagerStats is called) so that the G1 memory pools are updated - // before any GC notifications are raised. - g1mm()->update_sizes(); + post_full_gc_dump(); } - post_full_gc_dump(); - return true; } @@ -1825,7 +1831,7 @@ bool G1CollectedHeap::expand(size_t expand_bytes) { if (G1ExitOnExpansionFailure && _g1_storage.uncommitted_size() >= aligned_expand_bytes) { // We had head room... - vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion"); + vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion"); } } return successful; @@ -1949,13 +1955,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); assert(n_rem_sets > 0, "Invariant."); - HeapRegionRemSetIterator** iter_arr = - NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues, mtGC); - for (int i = 0; i < n_queues; i++) { - iter_arr[i] = new HeapRegionRemSetIterator(); - } - _rem_set_iterator = iter_arr; - _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC); @@ -3608,7 +3607,7 @@ G1CollectedHeap::setup_surviving_young_words() { uint array_length = g1_policy()->young_cset_region_length(); _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC); if (_surviving_young_words == NULL) { - vm_exit_out_of_memory(sizeof(size_t) * array_length, + vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR, "Not enough space for young surv words summary."); } memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t)); @@ -3838,7 +3837,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { // The elapsed time induced by the start time below deliberately elides // the possible verification above. double sample_start_time_sec = os::elapsedTime(); - size_t start_used_bytes = used(); #if YOUNG_LIST_VERBOSE gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); @@ -3846,8 +3844,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE - g1_policy()->record_collection_pause_start(sample_start_time_sec, - start_used_bytes); + g1_policy()->record_collection_pause_start(sample_start_time_sec); double scan_wait_start = os::elapsedTime(); // We have to wait until the CM threads finish scanning the @@ -4393,7 +4390,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num) PADDING_ELEM_NUM; _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC); if (_surviving_young_words_base == NULL) - vm_exit_out_of_memory(array_length * sizeof(size_t), + vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR, "Not enough space for young surv histo."); _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t)); @@ -5075,10 +5072,9 @@ g1_process_strong_roots(bool is_scavenging, } void -G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, - OopClosure* non_root_closure) { +G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) { CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); - SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); + SharedHeap::process_weak_roots(root_closure, &roots_in_blobs); } // Weak Reference Processing support diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index 4fbf0ff367a..32f5a46b471 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -786,9 +786,6 @@ protected: // concurrently after the collection. DirtyCardQueueSet _dirty_card_queue_set; - // The Heap Region Rem Set Iterator. - HeapRegionRemSetIterator** _rem_set_iterator; - // The closure used to refine a single card. RefineCardTableEntryClosure* _refine_cte_cl; @@ -827,8 +824,7 @@ protected: // Apply "blk" to all the weak roots of the system. These include // JNI weak roots, the code cache, system dictionary, symbol table, // string table, and referents of reachable weak refs. - void g1_process_weak_roots(OopClosure* root_closure, - OopClosure* non_root_closure); + void g1_process_weak_roots(OopClosure* root_closure); // Frees a non-humongous region by initializing its contents and // adding it to the free list that's passed as a parameter (this is @@ -1114,15 +1110,6 @@ public: G1RemSet* g1_rem_set() const { return _g1_rem_set; } ModRefBarrierSet* mr_bs() const { return _mr_bs; } - // The rem set iterator. - HeapRegionRemSetIterator* rem_set_iterator(int i) { - return _rem_set_iterator[i]; - } - - HeapRegionRemSetIterator* rem_set_iterator() { - return _rem_set_iterator[0]; - } - unsigned get_gc_time_stamp() { return _gc_time_stamp; } diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp index 893862aa84c..a645cc33d6c 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -406,7 +406,6 @@ void G1CollectorPolicy::init() { } _free_regions_at_end_of_collection = _g1->free_regions(); update_young_list_target_length(); - _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes; // We may immediately start allocating regions and placing them on the // collection set list. Initialize the per-collection set info @@ -746,6 +745,7 @@ G1CollectorPolicy::verify_young_ages(HeapRegion* head, void G1CollectorPolicy::record_full_collection_start() { _full_collection_start_sec = os::elapsedTime(); + record_heap_size_info_at_start(); // Release the future to-space so that it is available for compaction into. _g1->set_full_collection(); } @@ -788,8 +788,7 @@ void G1CollectorPolicy::record_stop_world_start() { _stop_world_start = os::elapsedTime(); } -void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, - size_t start_used) { +void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { // We only need to do this here as the policy will only be applied // to the GC we're about to start. so, no point is calculating this // every time we calculate / recalculate the target young length. @@ -803,19 +802,14 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, _trace_gen0_time_data.record_start_collection(s_w_t_ms); _stop_world_start = 0.0; + record_heap_size_info_at_start(); + phase_times()->record_cur_collection_start_sec(start_time_sec); - _cur_collection_pause_used_at_start_bytes = start_used; - _cur_collection_pause_used_regions_at_start = _g1->used_regions(); _pending_cards = _g1->pending_card_num(); _collection_set_bytes_used_before = 0; _bytes_copied_during_gc = 0; - YoungList* young_list = _g1->young_list(); - _eden_bytes_before_gc = young_list->eden_used_bytes(); - _survivor_bytes_before_gc = young_list->survivor_used_bytes(); - _capacity_before_gc = _g1->capacity(); - _last_gc_was_young = false; // do that for any other surv rate groups @@ -1153,6 +1147,21 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) { byte_size_in_proper_unit((double)(bytes)), \ proper_unit_for_byte_size((bytes)) +void G1CollectorPolicy::record_heap_size_info_at_start() { + YoungList* young_list = _g1->young_list(); + _eden_bytes_before_gc = young_list->eden_used_bytes(); + _survivor_bytes_before_gc = young_list->survivor_used_bytes(); + _capacity_before_gc = _g1->capacity(); + + _cur_collection_pause_used_at_start_bytes = _g1->used(); + _cur_collection_pause_used_regions_at_start = _g1->used_regions(); + + size_t eden_capacity_before_gc = + (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_bytes_before_gc; + + _prev_eden_capacity = eden_capacity_before_gc; +} + void G1CollectorPolicy::print_heap_transition() { _g1->print_size_transition(gclog_or_tty, _cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity()); @@ -1183,8 +1192,6 @@ void G1CollectorPolicy::print_detailed_heap_transition() { EXT_SIZE_PARAMS(_capacity_before_gc), EXT_SIZE_PARAMS(used), EXT_SIZE_PARAMS(capacity)); - - _prev_eden_capacity = eden_capacity; } void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp index 0c04167c8a4..08867850394 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -671,34 +671,36 @@ public: bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); - // Update the heuristic info to record a collection pause of the given - // start time, where the given number of bytes were used at the start. - // This may involve changing the desired size of a collection set. + // Record the start and end of an evacuation pause. + void record_collection_pause_start(double start_time_sec); + void record_collection_pause_end(double pause_time_ms); - void record_stop_world_start(); - - void record_collection_pause_start(double start_time_sec, size_t start_used); + // Record the start and end of a full collection. + void record_full_collection_start(); + void record_full_collection_end(); // Must currently be called while the world is stopped. - void record_concurrent_mark_init_end(double - mark_init_elapsed_time_ms); + void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms); + // Record start and end of remark. void record_concurrent_mark_remark_start(); void record_concurrent_mark_remark_end(); + // Record start, end, and completion of cleanup. void record_concurrent_mark_cleanup_start(); void record_concurrent_mark_cleanup_end(int no_of_gc_threads); void record_concurrent_mark_cleanup_completed(); - void record_concurrent_pause(); + // Records the information about the heap size for reporting in + // print_detailed_heap_transition + void record_heap_size_info_at_start(); - void record_collection_pause_end(double pause_time); + // Print heap sizing transition (with less and more detail). void print_heap_transition(); void print_detailed_heap_transition(); - // Record the fact that a full collection occurred. - void record_full_collection_start(); - void record_full_collection_end(); + void record_stop_world_start(); + void record_concurrent_pause(); // Record how much space we copied during a GC. This is typically // called when a GC alloc region is being retired. diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp index b987f7df4e7..d87a6cca135 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp @@ -144,33 +144,28 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, &GenMarkSweep::follow_stack_closure, NULL); - // Follow system dictionary roots and unload classes + + // This is the point where the entire marking should have completed. + assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed"); + + // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive); - assert(GenMarkSweep::_marking_stack.is_empty(), - "stack should be empty by now"); - // Follow code cache roots (has to be done after system dictionary, - // assumes all live klasses are marked) + // Unload nmethods. CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class); - GenMarkSweep::follow_stack(); - // Update subklass/sibling/implementor links of live klasses + // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(&GenMarkSweep::is_alive); - assert(GenMarkSweep::_marking_stack.is_empty(), - "stack should be empty by now"); - // Visit interned string tables and delete unmarked oops + // Delete entries for dead interned strings. StringTable::unlink(&GenMarkSweep::is_alive); + // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); - assert(GenMarkSweep::_marking_stack.is_empty(), - "stack should be empty by now"); - if (VerifyDuringGC) { HandleMark hm; // handle scope COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact); - gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying "); Universe::heap()->prepare_for_verify(); // Note: we can verify only the heap here. When an object is // marked, the previous value of the mark word (including @@ -182,11 +177,13 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, // fail. At the end of the GC, the orginal mark word values // (including hash values) are restored to the appropriate // objects. - Universe::heap()->verify(/* silent */ false, - /* option */ VerifyOption_G1UseMarkWord); - - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - gclog_or_tty->print_cr("]"); + if (!VerifySilently) { + gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying "); + } + Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord); + if (!VerifySilently) { + gclog_or_tty->print_cr("]"); + } } } @@ -308,17 +305,16 @@ void G1MarkSweep::mark_sweep_phase3() { sh->process_strong_roots(true, // activate StrongRootsScope false, // not scavenging. SharedHeap::SO_AllClasses, - &GenMarkSweep::adjust_root_pointer_closure, + &GenMarkSweep::adjust_pointer_closure, NULL, // do not touch code cache here &GenMarkSweep::adjust_klass_closure); assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity"); - g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure); + g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) - g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure, - &GenMarkSweep::adjust_pointer_closure); + g1h->g1_process_weak_roots(&GenMarkSweep::adjust_pointer_closure); GenMarkSweep::adjust_marks(); diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp index 04af52e9478..e7151071e9f 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp @@ -169,14 +169,13 @@ public: // _try_claimed || r->claim_iter() // is true: either we're supposed to work on claimed-but-not-complete // regions, or we successfully claimed the region. - HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i); - hrrs->init_iterator(iter); + HeapRegionRemSetIterator iter(hrrs); size_t card_index; // We claim cards in block so as to recude the contention. The block size is determined by // the G1RSetScanBlockSize parameter. size_t jump_to_card = hrrs->iter_claimed_next(_block_size); - for (size_t current_card = 0; iter->has_next(card_index); current_card++) { + for (size_t current_card = 0; iter.has_next(card_index); current_card++) { if (current_card >= jump_to_card + _block_size) { jump_to_card = hrrs->iter_claimed_next(_block_size); } diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp index 0468e9ac312..eee6b447087 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp @@ -53,14 +53,14 @@ protected: NumSeqTasks = 1 }; - CardTableModRefBS* _ct_bs; - SubTasksDone* _seq_task; - G1CollectorPolicy* _g1p; + CardTableModRefBS* _ct_bs; + SubTasksDone* _seq_task; + G1CollectorPolicy* _g1p; - ConcurrentG1Refine* _cg1r; + ConcurrentG1Refine* _cg1r; - size_t* _cards_scanned; - size_t _total_cards_scanned; + size_t* _cards_scanned; + size_t _total_cards_scanned; // Used for caching the closure that is responsible for scanning // references into the collection set. diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp index 56e94051c3a..1e005a26ecd 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp @@ -285,7 +285,7 @@ OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) : _fine_grain_regions = new PerRegionTablePtr[_max_fine_entries]; if (_fine_grain_regions == NULL) { - vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, + vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR, "Failed to allocate _fine_grain_entries."); } @@ -877,14 +877,9 @@ bool HeapRegionRemSet::iter_is_complete() { return _iter_state == Complete; } -void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const { - iter->initialize(this); -} - #ifndef PRODUCT void HeapRegionRemSet::print() const { - HeapRegionRemSetIterator iter; - init_iterator(&iter); + HeapRegionRemSetIterator iter(this); size_t card_index; while (iter.has_next(card_index)) { HeapWord* card_start = @@ -928,35 +923,23 @@ void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs, //-------------------- Iteration -------------------- -HeapRegionRemSetIterator:: -HeapRegionRemSetIterator() : - _hrrs(NULL), +HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) : + _hrrs(hrrs), _g1h(G1CollectedHeap::heap()), - _bosa(NULL), - _sparse_iter() { } - -void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) { - _hrrs = hrrs; - _coarse_map = &_hrrs->_other_regions._coarse_map; - _fine_grain_regions = _hrrs->_other_regions._fine_grain_regions; - _bosa = _hrrs->bosa(); - - _is = Sparse; + _coarse_map(&hrrs->_other_regions._coarse_map), + _fine_grain_regions(hrrs->_other_regions._fine_grain_regions), + _bosa(hrrs->bosa()), + _is(Sparse), // Set these values so that we increment to the first region. - _coarse_cur_region_index = -1; - _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1); - - _cur_region_cur_card = 0; - - _fine_array_index = -1; - _fine_cur_prt = NULL; - - _n_yielded_coarse = 0; - _n_yielded_fine = 0; - _n_yielded_sparse = 0; - - _sparse_iter.init(&hrrs->_other_regions._sparse_table); -} + _coarse_cur_region_index(-1), + _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1), + _cur_region_cur_card(0), + _fine_array_index(-1), + _fine_cur_prt(NULL), + _n_yielded_coarse(0), + _n_yielded_fine(0), + _n_yielded_sparse(0), + _sparse_iter(&hrrs->_other_regions._sparse_table) {} bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) { if (_hrrs->_other_regions._n_coarse_entries == 0) return false; @@ -1209,8 +1192,7 @@ void HeapRegionRemSet::test() { hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom()); // Now, does iteration yield these three? - HeapRegionRemSetIterator iter; - hrrs->init_iterator(&iter); + HeapRegionRemSetIterator iter(hrrs); size_t sum = 0; size_t card_index; while (iter.has_next(card_index)) { diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp index 1b1d42d7a35..2e165074e10 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp @@ -281,9 +281,6 @@ public: return (_iter_state == Unclaimed) && (_iter_claimed == 0); } - // Initialize the given iterator to iterate over this rem set. - void init_iterator(HeapRegionRemSetIterator* iter) const; - // The actual # of bytes this hr_remset takes up. size_t mem_size() { return _other_regions.mem_size() @@ -345,9 +342,9 @@ public: #endif }; -class HeapRegionRemSetIterator : public CHeapObj { +class HeapRegionRemSetIterator : public StackObj { - // The region over which we're iterating. + // The region RSet over which we're iterating. const HeapRegionRemSet* _hrrs; // Local caching of HRRS fields. @@ -362,8 +359,10 @@ class HeapRegionRemSetIterator : public CHeapObj { size_t _n_yielded_coarse; size_t _n_yielded_sparse; - // If true we're iterating over the coarse table; if false the fine - // table. + // Indicates what granularity of table that we're currently iterating over. + // We start iterating over the sparse table, progress to the fine grain + // table, and then finish with the coarse table. + // See HeapRegionRemSetIterator::has_next(). enum IterState { Sparse, Fine, @@ -403,9 +402,7 @@ class HeapRegionRemSetIterator : public CHeapObj { public: // We require an iterator to be initialized before use, so the // constructor does little. - HeapRegionRemSetIterator(); - - void initialize(const HeapRegionRemSet* hrrs); + HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs); // If there remains one or more cards to be yielded, returns true and // sets "card_index" to one of those cards (which is then considered diff --git a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp index 0daa63512a3..a08e8a9801e 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp @@ -35,10 +35,6 @@ #define UNROLL_CARD_LOOPS 1 -void SparsePRT::init_iterator(SparsePRTIter* sprt_iter) { - sprt_iter->init(this); -} - void SparsePRTEntry::init(RegionIdx_t region_ind) { _region_ind = region_ind; _next_index = NullEntry; diff --git a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp index ab0ab1f0205..6e821f73bbc 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp @@ -192,18 +192,11 @@ class RSHashTableIter VALUE_OBJ_CLASS_SPEC { size_t compute_card_ind(CardIdx_t ci); public: - RSHashTableIter() : - _tbl_ind(RSHashTable::NullEntry), + RSHashTableIter(RSHashTable* rsht) : + _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0. _bl_ind(RSHashTable::NullEntry), _card_ind((SparsePRTEntry::cards_num() - 1)), - _rsht(NULL) {} - - void init(RSHashTable* rsht) { - _rsht = rsht; - _tbl_ind = -1; // So that first increment gets to 0. - _bl_ind = RSHashTable::NullEntry; - _card_ind = (SparsePRTEntry::cards_num() - 1); - } + _rsht(rsht) {} bool has_next(size_t& card_index); }; @@ -284,8 +277,6 @@ public: static void cleanup_all(); RSHashTable* cur() const { return _cur; } - void init_iterator(SparsePRTIter* sprt_iter); - static void add_to_expanded_list(SparsePRT* sprt); static SparsePRT* get_from_expanded_list(); @@ -321,9 +312,9 @@ public: class SparsePRTIter: public RSHashTableIter { public: - void init(const SparsePRT* sprt) { - RSHashTableIter::init(sprt->cur()); - } + SparsePRTIter(const SparsePRT* sprt) : + RSHashTableIter(sprt->cur()) {} + bool has_next(size_t& card_index) { return RSHashTableIter::has_next(card_index); } diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp index 83d3edffc34..b2134f10e92 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp @@ -567,7 +567,7 @@ bool CardTableExtension::resize_commit_uncommit(int changed_region, MemRegion(new_start_aligned, new_end_for_commit); if (!os::commit_memory((char*)new_committed.start(), new_committed.byte_size())) { - vm_exit_out_of_memory(new_committed.byte_size(), + vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR, "card table expansion"); } } diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp index 2ea89a59eb8..234d5981a24 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp @@ -43,7 +43,7 @@ GCTaskThread::GCTaskThread(GCTaskManager* manager, _time_stamp_index(0) { if (!os::create_thread(this, os::pgc_thread)) - vm_exit_out_of_memory(0, "Cannot create GC thread. Out of system resources."); + vm_exit_out_of_memory(0, OOM_MALLOC_ERROR, "Cannot create GC thread. Out of system resources."); if (PrintGCTaskTimeStamps) { _time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC); diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp index 06b3daa7da1..a7713b4dcfa 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp @@ -99,7 +99,7 @@ void ObjectStartArray::set_covered_region(MemRegion mr) { // Expand size_t expand_by = requested_blocks_size_in_bytes - current_blocks_size_in_bytes; if (!_virtual_space.expand_by(expand_by)) { - vm_exit_out_of_memory(expand_by, "object start array expansion"); + vm_exit_out_of_memory(expand_by, OOM_MMAP_ERROR, "object start array expansion"); } // Clear *only* the newly allocated region memset(_blocks_region.end(), clean_block, expand_by); diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp index 63cd3760282..adbaee43fc3 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp @@ -138,8 +138,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyBeforeGC:"); - Universe::verify(); + Universe::verify(" VerifyBeforeGC:"); } // Verify object start arrays @@ -177,7 +176,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { size_t prev_used = heap->used(); // Capture metadata size before collection for sizing. - size_t metadata_prev_used = MetaspaceAux::used_in_bytes(); + size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); // For PrintGCDetails size_t old_gen_prev_used = old_gen->used_in_bytes(); @@ -238,6 +237,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); + MetaspaceAux::verify_metrics(); BiasedLocking::restore_marks(); Threads::gc_epilogue(); @@ -340,8 +340,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyAfterGC:"); - Universe::verify(); + Universe::verify(" VerifyAfterGC:"); } // Re-verify object start arrays @@ -518,23 +517,23 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL); } - // Follow system dictionary roots and unload classes + // This is the point where the entire marking should have completed. + assert(_marking_stack.is_empty(), "Marking should have completed"); + + // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); - // Follow code cache roots + // Unload nmethods. CodeCache::do_unloading(is_alive_closure(), purged_class); - follow_stack(); // Flush marking stack - // Update subklass/sibling/implementor links of live klasses - Klass::clean_weak_klass_links(&is_alive); - assert(_marking_stack.is_empty(), "just drained"); + // Prune dead klasses from subklass/sibling/implementor lists. + Klass::clean_weak_klass_links(is_alive_closure()); - // Visit interned string tables and delete unmarked oops + // Delete entries for dead interned strings. StringTable::unlink(is_alive_closure()); + // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); - - assert(_marking_stack.is_empty(), "stack should be empty by now"); } @@ -583,28 +582,27 @@ void PSMarkSweep::mark_sweep_phase3() { ClassLoaderDataGraph::clear_claimed_marks(); // General strong roots. - Universe::oops_do(adjust_root_pointer_closure()); - JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles - CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure()); - Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL); - ObjectSynchronizer::oops_do(adjust_root_pointer_closure()); - FlatProfiler::oops_do(adjust_root_pointer_closure()); - Management::oops_do(adjust_root_pointer_closure()); - JvmtiExport::oops_do(adjust_root_pointer_closure()); + Universe::oops_do(adjust_pointer_closure()); + JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles + CLDToOopClosure adjust_from_cld(adjust_pointer_closure()); + Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL); + ObjectSynchronizer::oops_do(adjust_pointer_closure()); + FlatProfiler::oops_do(adjust_pointer_closure()); + Management::oops_do(adjust_pointer_closure()); + JvmtiExport::oops_do(adjust_pointer_closure()); // SO_AllClasses - SystemDictionary::oops_do(adjust_root_pointer_closure()); - ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true); - //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure()); + SystemDictionary::oops_do(adjust_pointer_closure()); + ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) // Global (weak) JNI handles - JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure()); + JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); CodeCache::oops_do(adjust_pointer_closure()); - StringTable::oops_do(adjust_root_pointer_closure()); - ref_processor()->weak_oops_do(adjust_root_pointer_closure()); - PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure()); + StringTable::oops_do(adjust_pointer_closure()); + ref_processor()->weak_oops_do(adjust_pointer_closure()); + PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure()); adjust_marks(); diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp index fcbc103dc3a..7d96afbb4df 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp @@ -44,7 +44,6 @@ class PSMarkSweep : public MarkSweep { static KlassClosure* follow_klass_closure() { return &MarkSweep::follow_klass_closure; } static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; } static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; } - static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_root_pointer_closure; } static KlassClosure* adjust_klass_closure() { return &MarkSweep::adjust_klass_closure; } static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; } diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp index 487a4e56553..b03107a97cd 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp @@ -787,12 +787,11 @@ bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap( void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } -PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true); -PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false); +PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure; PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure; -void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); } -void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); } +void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); } +void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); } void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); } @@ -805,7 +804,7 @@ void PSParallelCompact::FollowKlassClosure::do_klass(Klass* klass) { klass->oops_do(_mark_and_push_closure); } void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) { - klass->oops_do(&PSParallelCompact::_adjust_root_pointer_closure); + klass->oops_do(&PSParallelCompact::_adjust_pointer_closure); } void PSParallelCompact::post_initialize() { @@ -892,7 +891,7 @@ public: _heap_used = heap->used(); _young_gen_used = heap->young_gen()->used_in_bytes(); _old_gen_used = heap->old_gen()->used_in_bytes(); - _metadata_used = MetaspaceAux::used_in_bytes(); + _metadata_used = MetaspaceAux::allocated_used_bytes(); }; size_t heap_used() const { return _heap_used; } @@ -967,8 +966,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values) if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyBeforeGC:"); - Universe::verify(); + Universe::verify(" VerifyBeforeGC:"); } // Verify object start arrays @@ -1027,6 +1025,7 @@ void PSParallelCompact::post_compact() // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); + MetaspaceAux::verify_metrics(); Threads::gc_epilogue(); CodeCache::gc_epilogue(); @@ -2168,8 +2167,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyAfterGC:"); - Universe::verify(); + Universe::verify(" VerifyAfterGC:"); } // Re-verify object start arrays @@ -2356,22 +2354,24 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm, } TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty); + + // This is the point where the entire marking should have completed. + assert(cm->marking_stacks_empty(), "Marking should have completed"); + // Follow system dictionary roots and unload classes. bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); - // Follow code cache roots. + // Unload nmethods. CodeCache::do_unloading(is_alive_closure(), purged_class); - cm->follow_marking_stacks(); // Flush marking stack. - // Update subklass/sibling/implementor links of live klasses + // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(is_alive_closure()); - // Visit interned string tables and delete unmarked oops + // Delete entries for dead interned strings. StringTable::unlink(is_alive_closure()); + // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); - - assert(cm->marking_stacks_empty(), "marking stacks should be empty"); } void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) { @@ -2398,7 +2398,7 @@ void PSParallelCompact::follow_class_loader(ParCompactionManager* cm, void PSParallelCompact::adjust_class_loader(ParCompactionManager* cm, ClassLoaderData* cld) { - cld->oops_do(PSParallelCompact::adjust_root_pointer_closure(), + cld->oops_do(PSParallelCompact::adjust_pointer_closure(), PSParallelCompact::adjust_klass_closure(), true); } @@ -2419,32 +2419,31 @@ void PSParallelCompact::adjust_roots() { ClassLoaderDataGraph::clear_claimed_marks(); // General strong roots. - Universe::oops_do(adjust_root_pointer_closure()); - JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles - CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure()); - Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL); - ObjectSynchronizer::oops_do(adjust_root_pointer_closure()); - FlatProfiler::oops_do(adjust_root_pointer_closure()); - Management::oops_do(adjust_root_pointer_closure()); - JvmtiExport::oops_do(adjust_root_pointer_closure()); + Universe::oops_do(adjust_pointer_closure()); + JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles + CLDToOopClosure adjust_from_cld(adjust_pointer_closure()); + Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL); + ObjectSynchronizer::oops_do(adjust_pointer_closure()); + FlatProfiler::oops_do(adjust_pointer_closure()); + Management::oops_do(adjust_pointer_closure()); + JvmtiExport::oops_do(adjust_pointer_closure()); // SO_AllClasses - SystemDictionary::oops_do(adjust_root_pointer_closure()); - ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true); + SystemDictionary::oops_do(adjust_pointer_closure()); + ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) // Global (weak) JNI handles - JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure()); + JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); CodeCache::oops_do(adjust_pointer_closure()); - StringTable::oops_do(adjust_root_pointer_closure()); - ref_processor()->weak_oops_do(adjust_root_pointer_closure()); + StringTable::oops_do(adjust_pointer_closure()); + ref_processor()->weak_oops_do(adjust_pointer_closure()); // Roots were visited so references into the young gen in roots // may have been scanned. Process them also. // Should the reference processor have a span that excludes // young gen objects? - PSScavenge::reference_processor()->weak_oops_do( - adjust_root_pointer_closure()); + PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure()); } void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q, diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp index 68b54fb9b13..6ced655c21a 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp @@ -799,16 +799,6 @@ class PSParallelCompact : AllStatic { virtual void do_oop(narrowOop* p); }; - // Current unused - class FollowRootClosure: public OopsInGenClosure { - private: - ParCompactionManager* _compaction_manager; - public: - FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } - virtual void do_oop(oop* p); - virtual void do_oop(narrowOop* p); - }; - class FollowStackClosure: public VoidClosure { private: ParCompactionManager* _compaction_manager; @@ -818,10 +808,7 @@ class PSParallelCompact : AllStatic { }; class AdjustPointerClosure: public OopClosure { - private: - bool _is_root; public: - AdjustPointerClosure(bool is_root) : _is_root(is_root) { } virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); // do not walk from thread stacks to the code cache on this phase @@ -838,7 +825,6 @@ class PSParallelCompact : AllStatic { friend class AdjustPointerClosure; friend class AdjustKlassClosure; friend class FollowKlassClosure; - friend class FollowRootClosure; friend class InstanceClassLoaderKlass; friend class RefProcTaskProxy; @@ -853,7 +839,6 @@ class PSParallelCompact : AllStatic { static IsAliveClosure _is_alive_closure; static SpaceInfo _space_info[last_space_id]; static bool _print_phases; - static AdjustPointerClosure _adjust_root_pointer_closure; static AdjustPointerClosure _adjust_pointer_closure; static AdjustKlassClosure _adjust_klass_closure; @@ -889,9 +874,6 @@ class PSParallelCompact : AllStatic { static void marking_phase(ParCompactionManager* cm, bool maximum_heap_compaction); - template static inline void adjust_pointer(T* p, bool is_root); - static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); } - template static inline void follow_root(ParCompactionManager* cm, T* p); @@ -1046,7 +1028,6 @@ class PSParallelCompact : AllStatic { // Closure accessors static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; } - static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; } static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; } static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; } @@ -1067,6 +1048,7 @@ class PSParallelCompact : AllStatic { // Check mark and maybe push on marking stack template static inline void mark_and_push(ParCompactionManager* cm, T* p); + template static inline void adjust_pointer(T* p); static void follow_klass(ParCompactionManager* cm, Klass* klass); static void adjust_klass(ParCompactionManager* cm, Klass* klass); @@ -1151,9 +1133,6 @@ class PSParallelCompact : AllStatic { static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; } static ParallelCompactData& summary_data() { return _summary_data; } - static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); } - static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); } - // Reference Processing static ReferenceProcessor* const ref_processor() { return _ref_processor; } @@ -1230,7 +1209,7 @@ inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) { } template -inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) { +inline void PSParallelCompact::adjust_pointer(T* p) { T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp index 078bd62aff7..4bc7870b98a 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp @@ -314,8 +314,7 @@ bool PSScavenge::invoke_no_policy() { if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyBeforeGC:"); - Universe::verify(); + Universe::verify(" VerifyBeforeGC:"); } { @@ -638,8 +637,7 @@ bool PSScavenge::invoke_no_policy() { if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyAfterGC:"); - Universe::verify(); + Universe::verify(" VerifyAfterGC:"); } heap->print_heap_after_gc(); diff --git a/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp b/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp index 6ea4097daa9..5e52aa1eb84 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp +++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp @@ -81,7 +81,7 @@ void MarkSweep::follow_class_loader(ClassLoaderData* cld) { } void MarkSweep::adjust_class_loader(ClassLoaderData* cld) { - cld->oops_do(&MarkSweep::adjust_root_pointer_closure, &MarkSweep::adjust_klass_closure, true); + cld->oops_do(&MarkSweep::adjust_pointer_closure, &MarkSweep::adjust_klass_closure, true); } @@ -121,11 +121,10 @@ void MarkSweep::preserve_mark(oop obj, markOop mark) { } } -MarkSweep::AdjustPointerClosure MarkSweep::adjust_root_pointer_closure(true); -MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure(false); +MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure; -void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); } -void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); } +void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); } +void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); } void MarkSweep::adjust_marks() { assert( _preserved_oop_stack.size() == _preserved_mark_stack.size(), diff --git a/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp b/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp index 1de7561ce55..ec724afa5ec 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp +++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp @@ -80,10 +80,7 @@ class MarkSweep : AllStatic { }; class AdjustPointerClosure: public OopsInGenClosure { - private: - bool _is_root; public: - AdjustPointerClosure(bool is_root) : _is_root(is_root) {} virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); }; @@ -146,7 +143,6 @@ class MarkSweep : AllStatic { static MarkAndPushClosure mark_and_push_closure; static FollowKlassClosure follow_klass_closure; static FollowStackClosure follow_stack_closure; - static AdjustPointerClosure adjust_root_pointer_closure; static AdjustPointerClosure adjust_pointer_closure; static AdjustKlassClosure adjust_klass_closure; @@ -179,12 +175,7 @@ class MarkSweep : AllStatic { static void adjust_marks(); // Adjust the pointers in the preserved marks table static void restore_marks(); // Restore the marks that we saved in preserve_mark - template static inline void adjust_pointer(T* p, bool isroot); - - static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); } - static void adjust_pointer(oop* p) { adjust_pointer(p, false); } - static void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); } - + template static inline void adjust_pointer(T* p); }; class PreservedMark VALUE_OBJ_CLASS_SPEC { diff --git a/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp b/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp index 9752291959a..8ffe0f78236 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp +++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp @@ -76,7 +76,7 @@ void MarkSweep::push_objarray(oop obj, size_t index) { _objarray_stack.push(task); } -template inline void MarkSweep::adjust_pointer(T* p, bool isroot) { +template inline void MarkSweep::adjust_pointer(T* p) { T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); diff --git a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp index 756ed28f010..211a084ab38 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp +++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp @@ -225,7 +225,10 @@ void VM_CollectForMetadataAllocation::doit() { gclog_or_tty->print_cr("\nCMS full GC for Metaspace"); } heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold); - _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); + // After a GC try to allocate without expanding. Could fail + // and expansion will be tried below. + _result = + _loader_data->metaspace_non_null()->allocate(_size, _mdtype); } if (_result == NULL && !UseConcMarkSweepGC /* CMS already tried */) { // If still failing, allow the Metaspace to expand. diff --git a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp index a14f0ff5905..f5f9d39bcec 100644 --- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp +++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -454,7 +454,7 @@ IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThrea continuation = Interpreter::remove_activation_entry(); #endif // Count this for compilation purposes - h_method->interpreter_throwout_increment(); + h_method->interpreter_throwout_increment(THREAD); } else { // handler in this method => change bci/bcp to handler bci/bcp and continue there handler_pc = h_method->code_base() + handler_bci; @@ -903,6 +903,15 @@ IRT_ENTRY(void, InterpreterRuntime::update_mdp_for_ret(JavaThread* thread, int r fr.interpreter_frame_set_mdp(new_mdp); IRT_END +IRT_ENTRY(MethodCounters*, InterpreterRuntime::build_method_counters(JavaThread* thread, Method* m)) + MethodCounters* mcs = Method::build_method_counters(m, thread); + if (HAS_PENDING_EXCEPTION) { + assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); + CLEAR_PENDING_EXCEPTION; + } + return mcs; +IRT_END + IRT_ENTRY(void, InterpreterRuntime::at_safepoint(JavaThread* thread)) // We used to need an explict preserve_arguments here for invoke bytecodes. However, @@ -1043,7 +1052,7 @@ void SignatureHandlerLibrary::initialize() { return; } if (set_handler_blob() == NULL) { - vm_exit_out_of_memory(blob_size, "native signature handlers"); + vm_exit_out_of_memory(blob_size, OOM_MALLOC_ERROR, "native signature handlers"); } BufferBlob* bb = BufferBlob::create("Signature Handler Temp Buffer", diff --git a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp index 3aa2c8348e2..d46c43e94e0 100644 --- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp +++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -169,6 +169,7 @@ class InterpreterRuntime: AllStatic { #ifdef ASSERT static void verify_mdp(Method* method, address bcp, address mdp); #endif // ASSERT + static MethodCounters* build_method_counters(JavaThread* thread, Method* m); }; diff --git a/hotspot/src/share/vm/interpreter/invocationCounter.cpp b/hotspot/src/share/vm/interpreter/invocationCounter.cpp index 747516369ce..6a113d0ffc6 100644 --- a/hotspot/src/share/vm/interpreter/invocationCounter.cpp +++ b/hotspot/src/share/vm/interpreter/invocationCounter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -104,15 +104,19 @@ const char* InvocationCounter::state_as_short_string(State state) { static address do_nothing(methodHandle method, TRAPS) { // dummy action for inactive invocation counters - method->invocation_counter()->set_carry(); - method->invocation_counter()->set_state(InvocationCounter::wait_for_nothing); + MethodCounters* mcs = method->method_counters(); + assert(mcs != NULL, ""); + mcs->invocation_counter()->set_carry(); + mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing); return NULL; } static address do_decay(methodHandle method, TRAPS) { // decay invocation counters so compilation gets delayed - method->invocation_counter()->decay(); + MethodCounters* mcs = method->method_counters(); + assert(mcs != NULL, ""); + mcs->invocation_counter()->decay(); return NULL; } diff --git a/hotspot/src/share/vm/interpreter/linkResolver.cpp b/hotspot/src/share/vm/interpreter/linkResolver.cpp index 9f0a1544812..b0bd678b597 100644 --- a/hotspot/src/share/vm/interpreter/linkResolver.cpp +++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp @@ -1014,13 +1014,28 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHand resolved_method->name(), resolved_method->signature())); } - // check if public - if (!sel_method->is_public()) { - ResourceMark rm(THREAD); - THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), - Method::name_and_sig_as_C_string(recv_klass(), - sel_method->name(), - sel_method->signature())); + // check access + if (sel_method->method_holder()->is_interface()) { + // Method holder is an interface. Throw Illegal Access Error if sel_method + // is neither public nor private. + if (!(sel_method->is_public() || sel_method->is_private())) { + ResourceMark rm(THREAD); + THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), + Method::name_and_sig_as_C_string(recv_klass(), + sel_method->name(), + sel_method->signature())); + } + } + else { + // Method holder is a class. Throw Illegal Access Error if sel_method + // is not public. + if (!sel_method->is_public()) { + ResourceMark rm(THREAD); + THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), + Method::name_and_sig_as_C_string(recv_klass(), + sel_method->name(), + sel_method->signature())); + } } // check if abstract if (check_null_and_abstract && sel_method->is_abstract()) { diff --git a/hotspot/src/share/vm/memory/allocation.cpp b/hotspot/src/share/vm/memory/allocation.cpp index f83eada8192..1c1bf7ab468 100644 --- a/hotspot/src/share/vm/memory/allocation.cpp +++ b/hotspot/src/share/vm/memory/allocation.cpp @@ -259,7 +259,7 @@ class ChunkPool: public CHeapObj { } if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC); if (p == NULL) - vm_exit_out_of_memory(bytes, "ChunkPool::allocate"); + vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate"); return p; } @@ -371,7 +371,7 @@ void* Chunk::operator new(size_t requested_size, size_t length) { default: { void *p = os::malloc(bytes, mtChunk, CALLER_PC); if (p == NULL) - vm_exit_out_of_memory(bytes, "Chunk::new"); + vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new"); return p; } } @@ -531,7 +531,7 @@ size_t Arena::used() const { } void Arena::signal_out_of_memory(size_t sz, const char* whence) const { - vm_exit_out_of_memory(sz, whence); + vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, whence); } // Grow a new Chunk diff --git a/hotspot/src/share/vm/memory/allocation.hpp b/hotspot/src/share/vm/memory/allocation.hpp index bc01b0135b0..b65b2979c2f 100644 --- a/hotspot/src/share/vm/memory/allocation.hpp +++ b/hotspot/src/share/vm/memory/allocation.hpp @@ -178,7 +178,7 @@ const bool NMT_track_callsite = false; #endif // INCLUDE_NMT // debug build does not inline -#if defined(_DEBUG_) +#if defined(_NMT_NOINLINE_) #define CURRENT_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0) #define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0) #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0) @@ -539,6 +539,9 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC { #define NEW_RESOURCE_ARRAY(type, size)\ (type*) resource_allocate_bytes((size) * sizeof(type)) +#define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\ + (type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL) + #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\ (type*) resource_allocate_bytes(thread, (size) * sizeof(type)) diff --git a/hotspot/src/share/vm/memory/allocation.inline.hpp b/hotspot/src/share/vm/memory/allocation.inline.hpp index 79bd774e358..c77e578dca6 100644 --- a/hotspot/src/share/vm/memory/allocation.inline.hpp +++ b/hotspot/src/share/vm/memory/allocation.inline.hpp @@ -58,7 +58,9 @@ inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0, #ifdef ASSERT if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p); #endif - if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) vm_exit_out_of_memory(size, "AllocateHeap"); + if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { + vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap"); + } return p; } @@ -68,7 +70,9 @@ inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flags, #ifdef ASSERT if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p); #endif - if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) vm_exit_out_of_memory(size, "ReallocateHeap"); + if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { + vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap"); + } return p; } @@ -130,12 +134,12 @@ E* ArrayAllocator ::allocate(size_t length) { _addr = os::reserve_memory(_size, NULL, alignment); if (_addr == NULL) { - vm_exit_out_of_memory(_size, "Allocator (reserve)"); + vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)"); } bool success = os::commit_memory(_addr, _size, false /* executable */); if (!success) { - vm_exit_out_of_memory(_size, "Allocator (commit)"); + vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (commit)"); } return (E*)_addr; diff --git a/hotspot/src/share/vm/memory/blockOffsetTable.cpp b/hotspot/src/share/vm/memory/blockOffsetTable.cpp index c083ec5098e..841794a0164 100644 --- a/hotspot/src/share/vm/memory/blockOffsetTable.cpp +++ b/hotspot/src/share/vm/memory/blockOffsetTable.cpp @@ -80,7 +80,7 @@ void BlockOffsetSharedArray::resize(size_t new_word_size) { assert(delta > 0, "just checking"); if (!_vs.expand_by(delta)) { // Do better than this for Merlin - vm_exit_out_of_memory(delta, "offset table expansion"); + vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion"); } assert(_vs.high() == high + delta, "invalid expansion"); } else { diff --git a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp index 3c65d29a5f2..cbe3654cc65 100644 --- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp +++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp @@ -116,7 +116,7 @@ CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap, _guard_region = MemRegion((HeapWord*)guard_page, _page_size); if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) { // Do better than this for Merlin - vm_exit_out_of_memory(_page_size, "card table last card"); + vm_exit_out_of_memory(_page_size, OOM_MMAP_ERROR, "card table last card"); } *guard_card = last_card; @@ -292,7 +292,7 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) { if (!os::commit_memory((char*)new_committed.start(), new_committed.byte_size(), _page_size)) { // Do better than this for Merlin - vm_exit_out_of_memory(new_committed.byte_size(), + vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR, "card table expansion"); } // Use new_end_aligned (as opposed to new_end_for_commit) because diff --git a/hotspot/src/share/vm/memory/filemap.cpp b/hotspot/src/share/vm/memory/filemap.cpp index 133685932fd..dbc0c87edce 100644 --- a/hotspot/src/share/vm/memory/filemap.cpp +++ b/hotspot/src/share/vm/memory/filemap.cpp @@ -238,8 +238,8 @@ void FileMapInfo::write_header() { void FileMapInfo::write_space(int i, Metaspace* space, bool read_only) { align_file_position(); - size_t used = space->used_words(Metaspace::NonClassType) * BytesPerWord; - size_t capacity = space->capacity_words(Metaspace::NonClassType) * BytesPerWord; + size_t used = space->used_bytes_slow(Metaspace::NonClassType); + size_t capacity = space->capacity_bytes_slow(Metaspace::NonClassType); struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i]; write_region(i, (char*)space->bottom(), used, capacity, read_only, false); } diff --git a/hotspot/src/share/vm/memory/genCollectedHeap.cpp b/hotspot/src/share/vm/memory/genCollectedHeap.cpp index f39631ae00b..8c2eb34d262 100644 --- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp +++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp @@ -377,7 +377,7 @@ void GenCollectedHeap::do_collection(bool full, ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); - const size_t metadata_prev_used = MetaspaceAux::used_in_bytes(); + const size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); print_heap_before_gc(); @@ -447,8 +447,7 @@ void GenCollectedHeap::do_collection(bool full, prepare_for_verify(); prepared_for_verification = true; } - gclog_or_tty->print(" VerifyBeforeGC:"); - Universe::verify(); + Universe::verify(" VerifyBeforeGC:"); } COMPILER2_PRESENT(DerivedPointerTable::clear()); @@ -519,8 +518,7 @@ void GenCollectedHeap::do_collection(bool full, if (VerifyAfterGC && i >= VerifyGCLevel && total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - gclog_or_tty->print(" VerifyAfterGC:"); - Universe::verify(); + Universe::verify(" VerifyAfterGC:"); } if (PrintGCDetails) { @@ -556,6 +554,7 @@ void GenCollectedHeap::do_collection(bool full, if (complete) { // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); + MetaspaceAux::verify_metrics(); // Resize the metaspace capacity after full collections MetaspaceGC::compute_new_size(); update_full_collections_completed(); @@ -633,9 +632,8 @@ gen_process_strong_roots(int level, } void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots, - OopClosure* non_root_closure) { - SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure); + CodeBlobClosure* code_roots) { + SharedHeap::process_weak_roots(root_closure, code_roots); // "Local" "weak" refs for (int i = 0; i < _n_gens; i++) { _gens[i]->ref_processor()->weak_oops_do(root_closure); diff --git a/hotspot/src/share/vm/memory/genCollectedHeap.hpp b/hotspot/src/share/vm/memory/genCollectedHeap.hpp index 034511b9b55..783cd372d7c 100644 --- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp +++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp @@ -432,8 +432,7 @@ public: // JNI weak roots, the code cache, system dictionary, symbol table, // string table, and referents of reachable weak refs. void gen_process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots, - OopClosure* non_root_closure); + CodeBlobClosure* code_roots); // Set the saved marks of generations, if that makes sense. // In particular, if any generation might iterate over the oops diff --git a/hotspot/src/share/vm/memory/genMarkSweep.cpp b/hotspot/src/share/vm/memory/genMarkSweep.cpp index 2180f63886f..6d0fd9b49d6 100644 --- a/hotspot/src/share/vm/memory/genMarkSweep.cpp +++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp @@ -223,23 +223,23 @@ void GenMarkSweep::mark_sweep_phase1(int level, &is_alive, &keep_alive, &follow_stack_closure, NULL); } - // Follow system dictionary roots and unload classes + // This is the point where the entire marking should have completed. + assert(_marking_stack.is_empty(), "Marking should have completed"); + + // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&is_alive); - // Follow code cache roots + // Unload nmethods. CodeCache::do_unloading(&is_alive, purged_class); - follow_stack(); // Flush marking stack - // Update subklass/sibling/implementor links of live klasses + // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(&is_alive); - assert(_marking_stack.is_empty(), "just drained"); - // Visit interned string tables and delete unmarked oops + // Delete entries for dead interned strings. StringTable::unlink(&is_alive); + // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); - - assert(_marking_stack.is_empty(), "stack should be empty by now"); } @@ -282,11 +282,10 @@ void GenMarkSweep::mark_sweep_phase3(int level) { // Need new claim bits for the pointer adjustment tracing. ClassLoaderDataGraph::clear_claimed_marks(); - // Because the two closures below are created statically, cannot + // Because the closure below is created statically, we cannot // use OopsInGenClosure constructor which takes a generation, // as the Universe has not been created when the static constructors // are run. - adjust_root_pointer_closure.set_orig_generation(gch->get_gen(level)); adjust_pointer_closure.set_orig_generation(gch->get_gen(level)); gch->gen_process_strong_roots(level, @@ -294,18 +293,17 @@ void GenMarkSweep::mark_sweep_phase3(int level) { true, // activate StrongRootsScope false, // not scavenging SharedHeap::SO_AllClasses, - &adjust_root_pointer_closure, + &adjust_pointer_closure, false, // do not walk code - &adjust_root_pointer_closure, + &adjust_pointer_closure, &adjust_klass_closure); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure, /*do_marking=*/ false); - gch->gen_process_weak_roots(&adjust_root_pointer_closure, - &adjust_code_pointer_closure, - &adjust_pointer_closure); + gch->gen_process_weak_roots(&adjust_pointer_closure, + &adjust_code_pointer_closure); adjust_marks(); GenAdjustPointersClosure blk; diff --git a/hotspot/src/share/vm/memory/heap.cpp b/hotspot/src/share/vm/memory/heap.cpp index bf41864c6f3..727690b5c7f 100644 --- a/hotspot/src/share/vm/memory/heap.cpp +++ b/hotspot/src/share/vm/memory/heap.cpp @@ -42,7 +42,7 @@ CodeHeap::CodeHeap() { _log2_segment_size = 0; _next_segment = 0; _freelist = NULL; - _free_segments = 0; + _freelist_segments = 0; } @@ -115,8 +115,8 @@ bool CodeHeap::reserve(size_t reserved_size, size_t committed_size, } on_code_mapping(_memory.low(), _memory.committed_size()); - _number_of_committed_segments = number_of_segments(_memory.committed_size()); - _number_of_reserved_segments = number_of_segments(_memory.reserved_size()); + _number_of_committed_segments = size_to_segments(_memory.committed_size()); + _number_of_reserved_segments = size_to_segments(_memory.reserved_size()); assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); // reserve space for _segmap @@ -149,8 +149,8 @@ bool CodeHeap::expand_by(size_t size) { if (!_memory.expand_by(dm)) return false; on_code_mapping(base, dm); size_t i = _number_of_committed_segments; - _number_of_committed_segments = number_of_segments(_memory.committed_size()); - assert(_number_of_reserved_segments == number_of_segments(_memory.reserved_size()), "number of reserved segments should not change"); + _number_of_committed_segments = size_to_segments(_memory.committed_size()); + assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change"); assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); // expand _segmap space size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size(); @@ -176,33 +176,44 @@ void CodeHeap::clear() { } -void* CodeHeap::allocate(size_t size) { - size_t length = number_of_segments(size + sizeof(HeapBlock)); - assert(length *_segment_size >= sizeof(FreeBlock), "not enough room for FreeList"); +void* CodeHeap::allocate(size_t instance_size, bool is_critical) { + size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock)); + assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList"); // First check if we can satify request from freelist debug_only(verify()); - HeapBlock* block = search_freelist(length); + HeapBlock* block = search_freelist(number_of_segments, is_critical); debug_only(if (VerifyCodeCacheOften) verify()); if (block != NULL) { - assert(block->length() >= length && block->length() < length + CodeCacheMinBlockLength, "sanity check"); + assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check"); assert(!block->free(), "must be marked free"); #ifdef ASSERT - memset((void *)block->allocated_space(), badCodeHeapNewVal, size); + memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size); #endif return block->allocated_space(); } - if (length < CodeCacheMinBlockLength) { - length = CodeCacheMinBlockLength; + // Ensure minimum size for allocation to the heap. + if (number_of_segments < CodeCacheMinBlockLength) { + number_of_segments = CodeCacheMinBlockLength; } - if (_next_segment + length <= _number_of_committed_segments) { - mark_segmap_as_used(_next_segment, _next_segment + length); + + if (!is_critical) { + // Make sure the allocation fits in the unallocated heap without using + // the CodeCacheMimimumFreeSpace that is reserved for critical allocations. + if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) { + // Fail allocation + return NULL; + } + } + + if (_next_segment + number_of_segments <= _number_of_committed_segments) { + mark_segmap_as_used(_next_segment, _next_segment + number_of_segments); HeapBlock* b = block_at(_next_segment); - b->initialize(length); - _next_segment += length; + b->initialize(number_of_segments); + _next_segment += number_of_segments; #ifdef ASSERT - memset((void *)b->allocated_space(), badCodeHeapNewVal, size); + memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size); #endif return b->allocated_space(); } else { @@ -219,7 +230,7 @@ void CodeHeap::deallocate(void* p) { #ifdef ASSERT memset((void *)b->allocated_space(), badCodeHeapFreeVal, - size(b->length()) - sizeof(HeapBlock)); + segments_to_size(b->length()) - sizeof(HeapBlock)); #endif add_to_freelist(b); @@ -299,32 +310,14 @@ size_t CodeHeap::max_capacity() const { } size_t CodeHeap::allocated_capacity() const { - // Start with the committed size in _memory; - size_t l = _memory.committed_size(); - - // Subtract the committed, but unused, segments - l -= size(_number_of_committed_segments - _next_segment); - - // Subtract the size of the freelist - l -= size(_free_segments); - - return l; + // size of used heap - size on freelist + return segments_to_size(_next_segment - _freelist_segments); } -size_t CodeHeap::largest_free_block() const { - // First check unused space excluding free blocks. - size_t free_sz = size(_free_segments); - size_t unused = max_capacity() - allocated_capacity() - free_sz; - if (unused >= free_sz) - return unused; - - // Now check largest free block. - size_t len = 0; - for (FreeBlock* b = _freelist; b != NULL; b = b->link()) { - if (b->length() > len) - len = b->length(); - } - return MAX2(unused, size(len)); +// Returns size of the unallocated heap block +size_t CodeHeap::heap_unallocated_capacity() const { + // Total number of segments - number currently used + return segments_to_size(_number_of_reserved_segments - _next_segment); } // Free list management @@ -365,7 +358,7 @@ void CodeHeap::add_to_freelist(HeapBlock *a) { assert(b != _freelist, "cannot be removed twice"); // Mark as free and update free space count - _free_segments += b->length(); + _freelist_segments += b->length(); b->set_free(); // First element in list? @@ -400,7 +393,7 @@ void CodeHeap::add_to_freelist(HeapBlock *a) { // Search freelist for an entry on the list with the best fit // Return NULL if no one was found -FreeBlock* CodeHeap::search_freelist(size_t length) { +FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) { FreeBlock *best_block = NULL; FreeBlock *best_prev = NULL; size_t best_length = 0; @@ -411,6 +404,16 @@ FreeBlock* CodeHeap::search_freelist(size_t length) { while(cur != NULL) { size_t l = cur->length(); if (l >= length && (best_block == NULL || best_length > l)) { + + // Non critical allocations are not allowed to use the last part of the code heap. + if (!is_critical) { + // Make sure the end of the allocation doesn't cross into the last part of the code heap + if (((size_t)cur + length) > ((size_t)high_boundary() - CodeCacheMinimumFreeSpace)) { + // the freelist is sorted by address - if one fails, all consecutive will also fail. + break; + } + } + // Remember best block, its previous element, and its length best_block = cur; best_prev = prev; @@ -452,7 +455,7 @@ FreeBlock* CodeHeap::search_freelist(size_t length) { } best_block->set_used(); - _free_segments -= length; + _freelist_segments -= length; return best_block; } @@ -478,7 +481,7 @@ void CodeHeap::verify() { } // Verify that freelist contains the right amount of free space - // guarantee(len == _free_segments, "wrong freelist"); + // guarantee(len == _freelist_segments, "wrong freelist"); // Verify that the number of free blocks is not out of hand. static int free_block_threshold = 10000; diff --git a/hotspot/src/share/vm/memory/heap.hpp b/hotspot/src/share/vm/memory/heap.hpp index f1aa3ffdc33..725592e67be 100644 --- a/hotspot/src/share/vm/memory/heap.hpp +++ b/hotspot/src/share/vm/memory/heap.hpp @@ -91,11 +91,11 @@ class CodeHeap : public CHeapObj { size_t _next_segment; FreeBlock* _freelist; - size_t _free_segments; // No. of segments in freelist + size_t _freelist_segments; // No. of segments in freelist // Helper functions - size_t number_of_segments(size_t size) const { return (size + _segment_size - 1) >> _log2_segment_size; } - size_t size(size_t number_of_segments) const { return number_of_segments << _log2_segment_size; } + size_t size_to_segments(size_t size) const { return (size + _segment_size - 1) >> _log2_segment_size; } + size_t segments_to_size(size_t number_of_segments) const { return number_of_segments << _log2_segment_size; } size_t segment_for(void* p) const { return ((char*)p - _memory.low()) >> _log2_segment_size; } HeapBlock* block_at(size_t i) const { return (HeapBlock*)(_memory.low() + (i << _log2_segment_size)); } @@ -110,7 +110,7 @@ class CodeHeap : public CHeapObj { // Toplevel freelist management void add_to_freelist(HeapBlock *b); - FreeBlock* search_freelist(size_t length); + FreeBlock* search_freelist(size_t length, bool is_critical); // Iteration helpers void* next_free(HeapBlock* b) const; @@ -132,22 +132,19 @@ class CodeHeap : public CHeapObj { void clear(); // clears all heap contents // Memory allocation - void* allocate (size_t size); // allocates a block of size or returns NULL + void* allocate (size_t size, bool is_critical); // allocates a block of size or returns NULL void deallocate(void* p); // deallocates a block // Attributes - void* begin() const { return _memory.low (); } - void* end() const { return _memory.high(); } - bool contains(void* p) const { return begin() <= p && p < end(); } - void* find_start(void* p) const; // returns the block containing p or NULL - size_t alignment_unit() const; // alignment of any block - size_t alignment_offset() const; // offset of first byte of any block, within the enclosing alignment unit - static size_t header_size(); // returns the header size for each heap block + char* low_boundary() const { return _memory.low_boundary (); } + char* high() const { return _memory.high(); } + char* high_boundary() const { return _memory.high_boundary(); } - // Returns reserved area high and low addresses - char *low_boundary() const { return _memory.low_boundary (); } - char *high() const { return _memory.high(); } - char *high_boundary() const { return _memory.high_boundary(); } + bool contains(const void* p) const { return low_boundary() <= p && p < high(); } + void* find_start(void* p) const; // returns the block containing p or NULL + size_t alignment_unit() const; // alignment of any block + size_t alignment_offset() const; // offset of first byte of any block, within the enclosing alignment unit + static size_t header_size(); // returns the header size for each heap block // Iteration @@ -161,8 +158,11 @@ class CodeHeap : public CHeapObj { size_t max_capacity() const; size_t allocated_capacity() const; size_t unallocated_capacity() const { return max_capacity() - allocated_capacity(); } - size_t largest_free_block() const; +private: + size_t heap_unallocated_capacity() const; + +public: // Debugging void verify(); void print() PRODUCT_RETURN; diff --git a/hotspot/src/share/vm/memory/metachunk.cpp b/hotspot/src/share/vm/memory/metachunk.cpp index 4cb955862a8..0ac4ced70f4 100644 --- a/hotspot/src/share/vm/memory/metachunk.cpp +++ b/hotspot/src/share/vm/memory/metachunk.cpp @@ -28,6 +28,7 @@ #include "utilities/copy.hpp" #include "utilities/debug.hpp" +class VirtualSpaceNode; // // Future modification // @@ -45,27 +46,30 @@ size_t Metachunk::_overhead = // Metachunk methods -Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) { - // Set bottom, top, and end. Allow space for the Metachunk itself - Metachunk* chunk = (Metachunk*) ptr; - - MetaWord* chunk_bottom = ptr + _overhead; - chunk->set_bottom(ptr); - chunk->set_top(chunk_bottom); - MetaWord* chunk_end = ptr + word_size; - assert(chunk_end > chunk_bottom, "Chunk must be too small"); - chunk->set_end(chunk_end); - chunk->set_next(NULL); - chunk->set_prev(NULL); - chunk->set_word_size(word_size); +Metachunk::Metachunk(size_t word_size, + VirtualSpaceNode* container) : + _word_size(word_size), + _bottom(NULL), + _end(NULL), + _top(NULL), + _next(NULL), + _prev(NULL), + _container(container) +{ + _bottom = (MetaWord*)this; + _top = (MetaWord*)this + _overhead; + _end = (MetaWord*)this + word_size; #ifdef ASSERT - size_t data_word_size = pointer_delta(chunk_end, chunk_bottom, sizeof(MetaWord)); - Copy::fill_to_words((HeapWord*) chunk_bottom, data_word_size, metadata_chunk_initialize); + set_is_free(false); + size_t data_word_size = pointer_delta(end(), + top(), + sizeof(MetaWord)); + Copy::fill_to_words((HeapWord*) top(), + data_word_size, + metadata_chunk_initialize); #endif - return chunk; } - MetaWord* Metachunk::allocate(size_t word_size) { MetaWord* result = NULL; // If available, bump the pointer to allocate. diff --git a/hotspot/src/share/vm/memory/metachunk.hpp b/hotspot/src/share/vm/memory/metachunk.hpp index a10cba8dbbe..ff237ab5d3f 100644 --- a/hotspot/src/share/vm/memory/metachunk.hpp +++ b/hotspot/src/share/vm/memory/metachunk.hpp @@ -41,10 +41,13 @@ // | | | | // +--------------+ <- bottom ---+ ---+ +class VirtualSpaceNode; + class Metachunk VALUE_OBJ_CLASS_SPEC { // link to support lists of chunks Metachunk* _next; Metachunk* _prev; + VirtualSpaceNode* _container; MetaWord* _bottom; MetaWord* _end; @@ -61,29 +64,20 @@ class Metachunk VALUE_OBJ_CLASS_SPEC { // the space. static size_t _overhead; - void set_bottom(MetaWord* v) { _bottom = v; } - void set_end(MetaWord* v) { _end = v; } - void set_top(MetaWord* v) { _top = v; } - void set_word_size(size_t v) { _word_size = v; } public: -#ifdef ASSERT - Metachunk() : _bottom(NULL), _end(NULL), _top(NULL), _is_free(false), - _next(NULL), _prev(NULL) {} -#else - Metachunk() : _bottom(NULL), _end(NULL), _top(NULL), - _next(NULL), _prev(NULL) {} -#endif + Metachunk(size_t word_size , VirtualSpaceNode* container); // Used to add a Metachunk to a list of Metachunks void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");} void set_prev(Metachunk* v) { _prev = v; assert(v != this, "Boom");} + void set_container(VirtualSpaceNode* v) { _container = v; } MetaWord* allocate(size_t word_size); - static Metachunk* initialize(MetaWord* ptr, size_t word_size); // Accessors Metachunk* next() const { return _next; } Metachunk* prev() const { return _prev; } + VirtualSpaceNode* container() const { return _container; } MetaWord* bottom() const { return _bottom; } MetaWord* end() const { return _end; } MetaWord* top() const { return _top; } diff --git a/hotspot/src/share/vm/memory/metaspace.cpp b/hotspot/src/share/vm/memory/metaspace.cpp index 533d982a9d7..68189bf3ea1 100644 --- a/hotspot/src/share/vm/memory/metaspace.cpp +++ b/hotspot/src/share/vm/memory/metaspace.cpp @@ -47,7 +47,6 @@ typedef BinaryTreeDictionary ChunkTreeDictionary; // the free chunk lists const bool metaspace_slow_verify = false; - // Parameters for stress mode testing const uint metadata_deallocate_a_lot_block = 10; const uint metadata_deallocate_a_lock_chunk = 3; @@ -103,27 +102,7 @@ bool MetaspaceGC::_should_concurrent_collect = false; // a chunk is placed on the free list of blocks (BlockFreelist) and // reused from there. -// Pointer to list of Metachunks. -class ChunkList VALUE_OBJ_CLASS_SPEC { - // List of free chunks - Metachunk* _head; - - public: - // Constructor - ChunkList() : _head(NULL) {} - - // Accessors - Metachunk* head() { return _head; } - void set_head(Metachunk* v) { _head = v; } - - // Link at head of the list - void add_at_head(Metachunk* head, Metachunk* tail); - void add_at_head(Metachunk* head); - - size_t sum_list_size(); - size_t sum_list_count(); - size_t sum_list_capacity(); -}; +typedef class FreeList ChunkList; // Manages the global free lists of chunks. // Has three lists of free chunks, and a total size and @@ -132,6 +111,7 @@ class ChunkList VALUE_OBJ_CLASS_SPEC { class ChunkManager VALUE_OBJ_CLASS_SPEC { // Free list of chunks of different sizes. + // SpecializedChunk // SmallChunk // MediumChunk // HumongousChunk @@ -185,6 +165,14 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC { // for special, small, medium, and humongous chunks. static ChunkIndex list_index(size_t size); + // Remove the chunk from its freelist. It is + // expected to be on one of the _free_chunks[] lists. + void remove_chunk(Metachunk* chunk); + + // Add the simple linked list of chunks to the freelist of chunks + // of type index. + void return_chunks(ChunkIndex index, Metachunk* chunks); + // Total of the space in the free chunks list size_t free_chunks_total(); size_t free_chunks_total_in_bytes(); @@ -231,7 +219,6 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC { void print_on(outputStream* st); }; - // Used to manage the free list of Metablocks (a block corresponds // to the allocation of a quantum of metadata). class BlockFreelist VALUE_OBJ_CLASS_SPEC { @@ -271,6 +258,8 @@ class VirtualSpaceNode : public CHeapObj { ReservedSpace _rs; VirtualSpace _virtual_space; MetaWord* _top; + // count of chunks contained in this VirtualSpace + uintx _container_count; // Convenience functions for logical bottom and end MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } @@ -280,10 +269,19 @@ class VirtualSpaceNode : public CHeapObj { char* low() const { return virtual_space()->low(); } char* high() const { return virtual_space()->high(); } + // The first Metachunk will be allocated at the bottom of the + // VirtualSpace + Metachunk* first_chunk() { return (Metachunk*) bottom(); } + + void inc_container_count(); +#ifdef ASSERT + uint container_count_slow(); +#endif + public: VirtualSpaceNode(size_t byte_size); - VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs) {} + VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} ~VirtualSpaceNode(); // address of next available space in _virtual_space; @@ -298,15 +296,22 @@ class VirtualSpaceNode : public CHeapObj { MemRegion* reserved() { return &_reserved; } VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } - // Returns true if "word_size" is available in the virtual space + // Returns true if "word_size" is available in the VirtualSpace bool is_available(size_t word_size) { return _top + word_size <= end(); } MetaWord* top() const { return _top; } void inc_top(size_t word_size) { _top += word_size; } + uintx container_count() { return _container_count; } + void dec_container_count(); +#ifdef ASSERT + void verify_container_count(); +#endif + // used and capacity in this single entry in the list size_t used_words_in_vs() const; size_t capacity_words_in_vs() const; + size_t free_words_in_vs() const; bool initialize(); @@ -322,6 +327,10 @@ class VirtualSpaceNode : public CHeapObj { bool expand_by(size_t words, bool pre_touch = false); bool shrink_by(size_t words); + // In preparation for deleting this node, remove all the chunks + // in the node from any freelist. + void purge(ChunkManager* chunk_manager); + #ifdef ASSERT // Debug support static void verify_virtual_space_total(); @@ -333,7 +342,7 @@ class VirtualSpaceNode : public CHeapObj { }; // byte_size is the size of the associated virtualspace. -VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0) { +VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) { // align up to vm allocation granularity byte_size = align_size_up(byte_size, os::vm_allocation_granularity()); @@ -357,6 +366,39 @@ VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); } +void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { + Metachunk* chunk = first_chunk(); + Metachunk* invalid_chunk = (Metachunk*) top(); + while (chunk < invalid_chunk ) { + assert(chunk->is_free(), "Should be marked free"); + MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); + chunk_manager->remove_chunk(chunk); + assert(chunk->next() == NULL && + chunk->prev() == NULL, + "Was not removed from its list"); + chunk = (Metachunk*) next; + } +} + +#ifdef ASSERT +uint VirtualSpaceNode::container_count_slow() { + uint count = 0; + Metachunk* chunk = first_chunk(); + Metachunk* invalid_chunk = (Metachunk*) top(); + while (chunk < invalid_chunk ) { + MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); + // Don't count the chunks on the free lists. Those are + // still part of the VirtualSpaceNode but not currently + // counted. + if (!chunk->is_free()) { + count++; + } + chunk = (Metachunk*) next; + } + return count; +} +#endif + // List of VirtualSpaces for metadata allocation. // It has a _next link for singly linked list and a MemRegion // for total space in the VirtualSpace. @@ -406,6 +448,8 @@ class VirtualSpaceList : public CHeapObj { VirtualSpaceList(size_t word_size); VirtualSpaceList(ReservedSpace rs); + size_t free_bytes(); + Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words, size_t medium_chunk_bunch); @@ -426,14 +470,14 @@ class VirtualSpaceList : public CHeapObj { void initialize(size_t word_size); size_t virtual_space_total() { return _virtual_space_total; } - void inc_virtual_space_total(size_t v) { - Atomic::add_ptr(v, &_virtual_space_total); - } - size_t virtual_space_count() { return _virtual_space_count; } - void inc_virtual_space_count() { - Atomic::inc_ptr(&_virtual_space_count); - } + void inc_virtual_space_total(size_t v); + void dec_virtual_space_total(size_t v); + void inc_virtual_space_count(); + void dec_virtual_space_count(); + + // Unlink empty VirtualSpaceNodes and free it. + void purge(); // Used and capacity in the entire list of virtual spaces. // These are global values shared by all Metaspaces @@ -536,7 +580,11 @@ class SpaceManager : public CHeapObj { bool has_small_chunk_limit() { return !vs_list()->is_class(); } // Sum of all space in allocated chunks - size_t _allocation_total; + size_t _allocated_blocks_words; + + // Sum of all allocated chunks + size_t _allocated_chunks_words; + size_t _allocated_chunks_count; // Free lists of blocks are per SpaceManager since they // are assumed to be in chunks in use by the SpaceManager @@ -592,12 +640,27 @@ class SpaceManager : public CHeapObj { size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; } size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } - size_t allocation_total() const { return _allocation_total; } - void inc_allocation_total(size_t v) { Atomic::add_ptr(v, &_allocation_total); } + size_t allocated_blocks_words() const { return _allocated_blocks_words; } + size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } + size_t allocated_chunks_words() const { return _allocated_chunks_words; } + size_t allocated_chunks_count() const { return _allocated_chunks_count; } + bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } static Mutex* expand_lock() { return _expand_lock; } + // Increment the per Metaspace and global running sums for Metachunks + // by the given size. This is used when a Metachunk to added to + // the in-use list. + void inc_size_metrics(size_t words); + // Increment the per Metaspace and global running sums Metablocks by the given + // size. This is used when a Metablock is allocated. + void inc_used_metrics(size_t words); + // Delete the portion of the running sums for this SpaceManager. That is, + // the globals running sums for the Metachunks and Metablocks are + // decremented for all the Metachunks in-use by this SpaceManager. + void dec_total_from_size_metrics(); + // Set the sizes for the initial chunks. void get_initial_chunk_sizes(Metaspace::MetaspaceType type, size_t* chunk_word_size, @@ -643,7 +706,7 @@ class SpaceManager : public CHeapObj { void verify_chunk_size(Metachunk* chunk); NOT_PRODUCT(void mangle_freed_chunks();) #ifdef ASSERT - void verify_allocation_total(); + void verify_allocated_blocks_words(); #endif }; @@ -657,6 +720,28 @@ Mutex* const SpaceManager::_expand_lock = SpaceManager::_expand_lock_name, Mutex::_allow_vm_block_flag); +void VirtualSpaceNode::inc_container_count() { + assert_lock_strong(SpaceManager::expand_lock()); + _container_count++; + assert(_container_count == container_count_slow(), + err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT + "container_count_slow() " SIZE_FORMAT, + _container_count, container_count_slow())); +} + +void VirtualSpaceNode::dec_container_count() { + assert_lock_strong(SpaceManager::expand_lock()); + _container_count--; +} + +#ifdef ASSERT +void VirtualSpaceNode::verify_container_count() { + assert(_container_count == container_count_slow(), + err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT + "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow())); +} +#endif + // BlockFreelist methods BlockFreelist::BlockFreelist() : _dictionary(NULL) {} @@ -717,6 +802,10 @@ void BlockFreelist::print_on(outputStream* st) const { VirtualSpaceNode::~VirtualSpaceNode() { _rs.release(); +#ifdef ASSERT + size_t word_size = sizeof(*this) / BytesPerWord; + Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); +#endif } size_t VirtualSpaceNode::used_words_in_vs() const { @@ -728,6 +817,9 @@ size_t VirtualSpaceNode::capacity_words_in_vs() const { return pointer_delta(end(), bottom(), sizeof(MetaWord)); } +size_t VirtualSpaceNode::free_words_in_vs() const { + return pointer_delta(end(), top(), sizeof(MetaWord)); +} // Allocates the chunk from the virtual space only. // This interface is also used internally for debugging. Not all @@ -749,8 +841,8 @@ Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { // Take the space (bump top on the current virtual space). inc_top(chunk_word_size); - // Point the chunk at the space - Metachunk* result = Metachunk::initialize(chunk_limit, chunk_word_size); + // Initialize the chunk + Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); return result; } @@ -778,9 +870,11 @@ bool VirtualSpaceNode::shrink_by(size_t words) { Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { assert_lock_strong(SpaceManager::expand_lock()); - Metachunk* result = NULL; - - return take_from_committed(chunk_word_size); + Metachunk* result = take_from_committed(chunk_word_size); + if (result != NULL) { + inc_container_count(); + } + return result; } Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) { @@ -859,6 +953,83 @@ VirtualSpaceList::~VirtualSpaceList() { } } +void VirtualSpaceList::inc_virtual_space_total(size_t v) { + assert_lock_strong(SpaceManager::expand_lock()); + _virtual_space_total = _virtual_space_total + v; +} +void VirtualSpaceList::dec_virtual_space_total(size_t v) { + assert_lock_strong(SpaceManager::expand_lock()); + _virtual_space_total = _virtual_space_total - v; +} + +void VirtualSpaceList::inc_virtual_space_count() { + assert_lock_strong(SpaceManager::expand_lock()); + _virtual_space_count++; +} +void VirtualSpaceList::dec_virtual_space_count() { + assert_lock_strong(SpaceManager::expand_lock()); + _virtual_space_count--; +} + +void ChunkManager::remove_chunk(Metachunk* chunk) { + size_t word_size = chunk->word_size(); + ChunkIndex index = list_index(word_size); + if (index != HumongousIndex) { + free_chunks(index)->remove_chunk(chunk); + } else { + humongous_dictionary()->remove_chunk(chunk); + } + + // Chunk is being removed from the chunks free list. + dec_free_chunks_total(chunk->capacity_word_size()); +} + +// Walk the list of VirtualSpaceNodes and delete +// nodes with a 0 container_count. Remove Metachunks in +// the node from their respective freelists. +void VirtualSpaceList::purge() { + assert_lock_strong(SpaceManager::expand_lock()); + // Don't use a VirtualSpaceListIterator because this + // list is being changed and a straightforward use of an iterator is not safe. + VirtualSpaceNode* purged_vsl = NULL; + VirtualSpaceNode* prev_vsl = virtual_space_list(); + VirtualSpaceNode* next_vsl = prev_vsl; + while (next_vsl != NULL) { + VirtualSpaceNode* vsl = next_vsl; + next_vsl = vsl->next(); + // Don't free the current virtual space since it will likely + // be needed soon. + if (vsl->container_count() == 0 && vsl != current_virtual_space()) { + // Unlink it from the list + if (prev_vsl == vsl) { + // This is the case of the current note being the first note. + assert(vsl == virtual_space_list(), "Expected to be the first note"); + set_virtual_space_list(vsl->next()); + } else { + prev_vsl->set_next(vsl->next()); + } + + vsl->purge(chunk_manager()); + dec_virtual_space_total(vsl->reserved()->word_size()); + dec_virtual_space_count(); + purged_vsl = vsl; + delete vsl; + } else { + prev_vsl = vsl; + } + } +#ifdef ASSERT + if (purged_vsl != NULL) { + // List should be stable enough to use an iterator here. + VirtualSpaceListIterator iter(virtual_space_list()); + while (iter.repeat()) { + VirtualSpaceNode* vsl = iter.get_next(); + assert(vsl != purged_vsl, "Purge of vsl failed"); + } + } +#endif +} + size_t VirtualSpaceList::used_words_sum() { size_t allocated_by_vs = 0; VirtualSpaceListIterator iter(virtual_space_list()); @@ -899,6 +1070,9 @@ VirtualSpaceList::VirtualSpaceList(size_t word_size ) : Mutex::_no_safepoint_check_flag); bool initialization_succeeded = grow_vs(word_size); + _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk); + _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk); + _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk); assert(initialization_succeeded, " VirtualSpaceList initialization should not fail"); } @@ -913,10 +1087,17 @@ VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : Mutex::_no_safepoint_check_flag); VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); bool succeeded = class_entry->initialize(); + _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk); + _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk); + _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk); assert(succeeded, " VirtualSpaceList initialization should not fail"); link_vs(class_entry, rs.size()/BytesPerWord); } +size_t VirtualSpaceList::free_bytes() { + return virtual_space_list()->free_words_in_vs() * BytesPerWord; +} + // Allocate another meta virtual space and add it to the list. bool VirtualSpaceList::grow_vs(size_t vs_word_size) { assert_lock_strong(SpaceManager::expand_lock()); @@ -965,8 +1146,10 @@ Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, // Get a chunk from the chunk freelist Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words); - // Allocate a chunk out of the current virtual space. - if (next == NULL) { + if (next != NULL) { + next->container()->inc_container_count(); + } else { + // Allocate a chunk out of the current virtual space. next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); } @@ -1055,9 +1238,9 @@ bool VirtualSpaceList::contains(const void *ptr) { // // After the GC the compute_new_size() for MetaspaceGC is called to // resize the capacity of the metaspaces. The current implementation -// is based on the flags MinMetaspaceFreeRatio and MaxHeapFreeRatio used +// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used // to resize the Java heap by some GC's. New flags can be implemented -// if really needed. MinHeapFreeRatio is used to calculate how much +// if really needed. MinMetaspaceFreeRatio is used to calculate how much // free space is desirable in the metaspace capacity to decide how much // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much // free space is desirable in the metaspace capacity before decreasing @@ -1092,7 +1275,11 @@ size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) { } bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) { + + size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes(); // If the user wants a limit, impose one. + size_t max_metaspace_size_bytes = MaxMetaspaceSize; + size_t metaspace_size_bytes = MetaspaceSize; if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) && MetaspaceAux::reserved_in_bytes() >= MaxMetaspaceSize) { return false; @@ -1104,57 +1291,48 @@ bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) { // If this is part of an allocation after a GC, expand // unconditionally. - if(MetaspaceGC::expand_after_GC()) { + if (MetaspaceGC::expand_after_GC()) { return true; } - size_t metaspace_size_words = MetaspaceSize / BytesPerWord; + // If the capacity is below the minimum capacity, allow the // expansion. Also set the high-water-mark (capacity_until_GC) // to that minimum capacity so that a GC will not be induced // until that minimum capacity is exceeded. - if (vsl->capacity_words_sum() < metaspace_size_words || + if (committed_capacity_bytes < metaspace_size_bytes || capacity_until_GC() == 0) { - set_capacity_until_GC(metaspace_size_words); + set_capacity_until_GC(metaspace_size_bytes); return true; } else { - if (vsl->capacity_words_sum() < capacity_until_GC()) { + if (committed_capacity_bytes < capacity_until_GC()) { return true; } else { if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print_cr(" allocation request size " SIZE_FORMAT " capacity_until_GC " SIZE_FORMAT - " capacity_words_sum " SIZE_FORMAT - " used_words_sum " SIZE_FORMAT - " free chunks " SIZE_FORMAT - " free chunks count %d", + " allocated_capacity_bytes " SIZE_FORMAT, word_size, capacity_until_GC(), - vsl->capacity_words_sum(), - vsl->used_words_sum(), - vsl->chunk_manager()->free_chunks_total(), - vsl->chunk_manager()->free_chunks_count()); + MetaspaceAux::allocated_capacity_bytes()); } return false; } } } -// Variables are in bytes + void MetaspaceGC::compute_new_size() { assert(_shrink_factor <= 100, "invalid shrink factor"); uint current_shrink_factor = _shrink_factor; _shrink_factor = 0; - VirtualSpaceList *vsl = Metaspace::space_list(); - - size_t capacity_after_gc = vsl->capacity_bytes_sum(); - // Check to see if these two can be calculated without walking the CLDG - size_t used_after_gc = vsl->used_bytes_sum(); - size_t capacity_until_GC = vsl->capacity_bytes_sum(); - size_t free_after_gc = capacity_until_GC - used_after_gc; + // Until a faster way of calculating the "used" quantity is implemented, + // use "capacity". + const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes(); + const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; const double maximum_used_percentage = 1.0 - minimum_free_percentage; @@ -1167,45 +1345,34 @@ void MetaspaceGC::compute_new_size() { MetaspaceSize); if (PrintGCDetails && Verbose) { - const double free_percentage = ((double)free_after_gc) / capacity_until_GC; gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: "); gclog_or_tty->print_cr(" " " minimum_free_percentage: %6.2f" " maximum_used_percentage: %6.2f", minimum_free_percentage, maximum_used_percentage); - double d_free_after_gc = free_after_gc / (double) K; gclog_or_tty->print_cr(" " - " free_after_gc : %6.1fK" - " used_after_gc : %6.1fK" - " capacity_after_gc : %6.1fK" - " metaspace HWM : %6.1fK", - free_after_gc / (double) K, - used_after_gc / (double) K, - capacity_after_gc / (double) K, - capacity_until_GC / (double) K); - gclog_or_tty->print_cr(" " - " free_percentage: %6.2f", - free_percentage); + " used_after_gc : %6.1fKB", + used_after_gc / (double) K); } + size_t shrink_bytes = 0; if (capacity_until_GC < minimum_desired_capacity) { // If we have less capacity below the metaspace HWM, then // increment the HWM. size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; // Don't expand unless it's significant if (expand_bytes >= MinMetaspaceExpansion) { - size_t expand_words = expand_bytes / BytesPerWord; - MetaspaceGC::inc_capacity_until_GC(expand_words); + MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes); } if (PrintGCDetails && Verbose) { - size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes(); + size_t new_capacity_until_GC = capacity_until_GC; gclog_or_tty->print_cr(" expanding:" - " minimum_desired_capacity: %6.1fK" - " expand_words: %6.1fK" - " MinMetaspaceExpansion: %6.1fK" - " new metaspace HWM: %6.1fK", + " minimum_desired_capacity: %6.1fKB" + " expand_bytes: %6.1fKB" + " MinMetaspaceExpansion: %6.1fKB" + " new metaspace HWM: %6.1fKB", minimum_desired_capacity / (double) K, expand_bytes / (double) K, MinMetaspaceExpansion / (double) K, @@ -1215,11 +1382,10 @@ void MetaspaceGC::compute_new_size() { } // No expansion, now see if we want to shrink - size_t shrink_words = 0; // We would never want to shrink more than this - size_t max_shrink_words = capacity_until_GC - minimum_desired_capacity; - assert(max_shrink_words >= 0, err_msg("max_shrink_words " SIZE_FORMAT, - max_shrink_words)); + size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; + assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT, + max_shrink_bytes)); // Should shrinking be considered? if (MaxMetaspaceFreeRatio < 100) { @@ -1229,17 +1395,15 @@ void MetaspaceGC::compute_new_size() { size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); maximum_desired_capacity = MAX2(maximum_desired_capacity, MetaspaceSize); - if (PrintGC && Verbose) { + if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr(" " " maximum_free_percentage: %6.2f" " minimum_used_percentage: %6.2f", maximum_free_percentage, minimum_used_percentage); gclog_or_tty->print_cr(" " - " capacity_until_GC: %6.1fK" - " minimum_desired_capacity: %6.1fK" - " maximum_desired_capacity: %6.1fK", - capacity_until_GC / (double) K, + " minimum_desired_capacity: %6.1fKB" + " maximum_desired_capacity: %6.1fKB", minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); } @@ -1249,17 +1413,17 @@ void MetaspaceGC::compute_new_size() { if (capacity_until_GC > maximum_desired_capacity) { // Capacity too large, compute shrinking size - shrink_words = capacity_until_GC - maximum_desired_capacity; + shrink_bytes = capacity_until_GC - maximum_desired_capacity; // We don't want shrink all the way back to initSize if people call // System.gc(), because some programs do that between "phases" and then // we'd just have to grow the heap up again for the next phase. So we // damp the shrinking: 0% on the first call, 10% on the second call, 40% // on the third call, and 100% by the fourth call. But if we recompute // size without shrinking, it goes back to 0%. - shrink_words = shrink_words / 100 * current_shrink_factor; - assert(shrink_words <= max_shrink_words, + shrink_bytes = shrink_bytes / 100 * current_shrink_factor; + assert(shrink_bytes <= max_shrink_bytes, err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, - shrink_words, max_shrink_words)); + shrink_bytes, max_shrink_bytes)); if (current_shrink_factor == 0) { _shrink_factor = 10; } else { @@ -1273,11 +1437,11 @@ void MetaspaceGC::compute_new_size() { MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); gclog_or_tty->print_cr(" " - " shrink_words: %.1fK" + " shrink_bytes: %.1fK" " current_shrink_factor: %d" " new shrink factor: %d" " MinMetaspaceExpansion: %.1fK", - shrink_words / (double) K, + shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); @@ -1285,23 +1449,11 @@ void MetaspaceGC::compute_new_size() { } } - // Don't shrink unless it's significant - if (shrink_words >= MinMetaspaceExpansion) { - VirtualSpaceNode* csp = vsl->current_virtual_space(); - size_t available_to_shrink = csp->capacity_words_in_vs() - - csp->used_words_in_vs(); - shrink_words = MIN2(shrink_words, available_to_shrink); - csp->shrink_by(shrink_words); - MetaspaceGC::dec_capacity_until_GC(shrink_words); - if (PrintGCDetails && Verbose) { - size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes(); - gclog_or_tty->print_cr(" metaspace HWM: %.1fK", new_capacity_until_GC / (double) K); - } + if (shrink_bytes >= MinMetaspaceExpansion && + ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { + MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes); } - assert(used_after_gc <= vsl->capacity_bytes_sum(), - "sanity check"); - } // Metadebug methods @@ -1380,76 +1532,6 @@ bool Metadebug::test_metadata_failure() { } #endif -// ChunkList methods - -size_t ChunkList::sum_list_size() { - size_t result = 0; - Metachunk* cur = head(); - while (cur != NULL) { - result += cur->word_size(); - cur = cur->next(); - } - return result; -} - -size_t ChunkList::sum_list_count() { - size_t result = 0; - Metachunk* cur = head(); - while (cur != NULL) { - result++; - cur = cur->next(); - } - return result; -} - -size_t ChunkList::sum_list_capacity() { - size_t result = 0; - Metachunk* cur = head(); - while (cur != NULL) { - result += cur->capacity_word_size(); - cur = cur->next(); - } - return result; -} - -void ChunkList::add_at_head(Metachunk* head, Metachunk* tail) { - assert_lock_strong(SpaceManager::expand_lock()); - assert(head == tail || tail->next() == NULL, - "Not the tail or the head has already been added to a list"); - - if (TraceMetadataChunkAllocation && Verbose) { - gclog_or_tty->print("ChunkList::add_at_head(head, tail): "); - Metachunk* cur = head; - while (cur != NULL) { - gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", cur, cur->word_size()); - cur = cur->next(); - } - gclog_or_tty->print_cr(""); - } - - if (tail != NULL) { - tail->set_next(_head); - } - set_head(head); -} - -void ChunkList::add_at_head(Metachunk* list) { - if (list == NULL) { - // Nothing to add - return; - } - assert_lock_strong(SpaceManager::expand_lock()); - Metachunk* head = list; - Metachunk* tail = list; - Metachunk* cur = head->next(); - // Search for the tail since it is not passed. - while (cur != NULL) { - tail = cur; - cur = cur->next(); - } - add_at_head(head, tail); -} - // ChunkManager methods // Verification of _free_chunks_total and _free_chunks_count does not @@ -1553,7 +1635,7 @@ size_t ChunkManager::sum_free_chunks() { continue; } - result = result + list->sum_list_capacity(); + result = result + list->count() * list->size(); } result = result + humongous_dictionary()->total_size(); return result; @@ -1567,7 +1649,7 @@ size_t ChunkManager::sum_free_chunks_count() { if (list == NULL) { continue; } - count = count + list->sum_list_count(); + count = count + list->count(); } count = count + humongous_dictionary()->total_free_blocks(); return count; @@ -1622,7 +1704,7 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) { } // Remove the chunk as the head of the list. - free_list->set_head(chunk->next()); + free_list->remove_chunk(chunk); // Chunk is being removed from the chunks free list. dec_free_chunks_total(chunk->capacity_word_size()); @@ -1647,9 +1729,6 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) { } // Chunk is being removed from the chunks free list. dec_free_chunks_total(chunk->capacity_word_size()); -#ifdef ASSERT - chunk->set_is_free(false); -#endif } else { return NULL; } @@ -1658,6 +1737,11 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) { // Remove it from the links to this freelist chunk->set_next(NULL); chunk->set_prev(NULL); +#ifdef ASSERT + // Chunk is no longer on any freelist. Setting to false make container_count_slow() + // work. + chunk->set_is_free(false); +#endif slow_locked_verify(); return chunk; } @@ -1679,7 +1763,7 @@ Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { size_t list_count; if (list_index(word_size) < HumongousIndex) { ChunkList* list = find_free_chunks_list(word_size); - list_count = list->sum_list_count(); + list_count = list->count(); } else { list_count = humongous_dictionary()->total_count(); } @@ -1772,18 +1856,28 @@ size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { } size_t SpaceManager::sum_capacity_in_chunks_in_use() const { - MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); - size_t sum = 0; - for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { - Metachunk* chunk = chunks_in_use(i); - while (chunk != NULL) { - // Just changed this sum += chunk->capacity_word_size(); - // sum += chunk->word_size() - Metachunk::overhead(); - sum += chunk->capacity_word_size(); - chunk = chunk->next(); + // For CMS use "allocated_chunks_words()" which does not need the + // Metaspace lock. For the other collectors sum over the + // lists. Use both methods as a check that "allocated_chunks_words()" + // is correct. That is, sum_capacity_in_chunks() is too expensive + // to use in the product and allocated_chunks_words() should be used + // but allow for checking that allocated_chunks_words() returns the same + // value as sum_capacity_in_chunks_in_use() which is the definitive + // answer. + if (UseConcMarkSweepGC) { + return allocated_chunks_words(); + } else { + MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); + size_t sum = 0; + for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { + Metachunk* chunk = chunks_in_use(i); + while (chunk != NULL) { + sum += chunk->capacity_word_size(); + chunk = chunk->next(); + } } - } return sum; + } } size_t SpaceManager::sum_count_in_chunks_in_use() { @@ -1941,12 +2035,44 @@ void SpaceManager::print_on(outputStream* st) const { SpaceManager::SpaceManager(Mutex* lock, VirtualSpaceList* vs_list) : _vs_list(vs_list), - _allocation_total(0), + _allocated_blocks_words(0), + _allocated_chunks_words(0), + _allocated_chunks_count(0), _lock(lock) { initialize(); } +void SpaceManager::inc_size_metrics(size_t words) { + assert_lock_strong(SpaceManager::expand_lock()); + // Total of allocated Metachunks and allocated Metachunks count + // for each SpaceManager + _allocated_chunks_words = _allocated_chunks_words + words; + _allocated_chunks_count++; + // Global total of capacity in allocated Metachunks + MetaspaceAux::inc_capacity(words); + // Global total of allocated Metablocks. + // used_words_slow() includes the overhead in each + // Metachunk so include it in the used when the + // Metachunk is first added (so only added once per + // Metachunk). + MetaspaceAux::inc_used(Metachunk::overhead()); +} + +void SpaceManager::inc_used_metrics(size_t words) { + // Add to the per SpaceManager total + Atomic::add_ptr(words, &_allocated_blocks_words); + // Add to the global total + MetaspaceAux::inc_used(words); +} + +void SpaceManager::dec_total_from_size_metrics() { + MetaspaceAux::dec_capacity(allocated_chunks_words()); + MetaspaceAux::dec_used(allocated_blocks_words()); + // Also deduct the overhead per Metachunk + MetaspaceAux::dec_used(allocated_chunks_count() * Metachunk::overhead()); +} + void SpaceManager::initialize() { Metadebug::init_allocation_fail_alot_count(); for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { @@ -1958,9 +2084,37 @@ void SpaceManager::initialize() { } } +void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) { + if (chunks == NULL) { + return; + } + ChunkList* list = free_chunks(index); + assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes"); + assert_lock_strong(SpaceManager::expand_lock()); + Metachunk* cur = chunks; + + // This returns chunks one at a time. If a new + // class List can be created that is a base class + // of FreeList then something like FreeList::prepend() + // can be used in place of this loop + while (cur != NULL) { + assert(cur->container() != NULL, "Container should have been set"); + cur->container()->dec_container_count(); + // Capture the next link before it is changed + // by the call to return_chunk_at_head(); + Metachunk* next = cur->next(); + cur->set_is_free(true); + list->return_chunk_at_head(cur); + cur = next; + } +} + SpaceManager::~SpaceManager() { // This call this->_lock which can't be done while holding expand_lock() - const size_t in_use_before = sum_capacity_in_chunks_in_use(); + assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), + err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT + " allocated_chunks_words() " SIZE_FORMAT, + sum_capacity_in_chunks_in_use(), allocated_chunks_words())); MutexLockerEx fcl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); @@ -1969,17 +2123,19 @@ SpaceManager::~SpaceManager() { chunk_manager->slow_locked_verify(); + dec_total_from_size_metrics(); + if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this); locked_print_chunks_in_use_on(gclog_or_tty); } - // Mangle freed memory. - NOT_PRODUCT(mangle_freed_chunks();) + // Do not mangle freed Metachunks. The chunk size inside Metachunks + // is during the freeing of a VirtualSpaceNodes. // Have to update before the chunks_in_use lists are emptied // below. - chunk_manager->inc_free_chunks_total(in_use_before, + chunk_manager->inc_free_chunks_total(allocated_chunks_words(), sum_count_in_chunks_in_use()); // Add all the chunks in use by this space manager @@ -1995,11 +2151,11 @@ SpaceManager::~SpaceManager() { chunk_size_name(i)); } Metachunk* chunks = chunks_in_use(i); - chunk_manager->free_chunks(i)->add_at_head(chunks); + chunk_manager->return_chunks(i, chunks); set_chunks_in_use(i, NULL); if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print_cr("updated freelist count %d %s", - chunk_manager->free_chunks(i)->sum_list_count(), + chunk_manager->free_chunks(i)->count(), chunk_size_name(i)); } assert(i != HumongousIndex, "Humongous chunks are handled explicitly later"); @@ -2035,6 +2191,7 @@ SpaceManager::~SpaceManager() { " granularity %d", humongous_chunks->word_size(), HumongousChunkGranularity)); Metachunk* next_humongous_chunks = humongous_chunks->next(); + humongous_chunks->container()->dec_container_count(); chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks); humongous_chunks = next_humongous_chunks; } @@ -2044,7 +2201,6 @@ SpaceManager::~SpaceManager() { chunk_manager->humongous_dictionary()->total_count(), chunk_size_name(HumongousIndex)); } - set_chunks_in_use(HumongousIndex, NULL); chunk_manager->slow_locked_verify(); } @@ -2124,12 +2280,17 @@ void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); } + // Add to the running sum of capacity + inc_size_metrics(new_chunk->word_size()); + assert(new_chunk->is_empty(), "Not ready for reuse"); if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print("SpaceManager::add_chunk: %d) ", sum_count_in_chunks_in_use()); new_chunk->print_on(gclog_or_tty); - vs_list()->chunk_manager()->locked_print_free_chunks(tty); + if (vs_list() != NULL) { + vs_list()->chunk_manager()->locked_print_free_chunks(tty); + } } } @@ -2200,7 +2361,7 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) { // of memory if this returns null. if (DumpSharedSpaces) { assert(current_chunk() != NULL, "should never happen"); - inc_allocation_total(word_size); + inc_used_metrics(word_size); return current_chunk()->allocate(word_size); // caller handles null result } if (current_chunk() != NULL) { @@ -2211,7 +2372,7 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) { result = grow_and_allocate(word_size); } if (result > 0) { - inc_allocation_total(word_size); + inc_used_metrics(word_size); assert(result != (MetaWord*) chunks_in_use(MediumIndex), "Head of the list is being allocated"); } @@ -2245,20 +2406,14 @@ void SpaceManager::verify_chunk_size(Metachunk* chunk) { } #ifdef ASSERT -void SpaceManager::verify_allocation_total() { +void SpaceManager::verify_allocated_blocks_words() { // Verification is only guaranteed at a safepoint. - if (SafepointSynchronize::is_at_safepoint()) { - gclog_or_tty->print_cr("Chunk " PTR_FORMAT " allocation_total " SIZE_FORMAT - " sum_used_in_chunks_in_use " SIZE_FORMAT, - this, - allocation_total(), - sum_used_in_chunks_in_use()); - } - MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); - assert(allocation_total() == sum_used_in_chunks_in_use(), + assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), + "Verification can fail if the applications is running"); + assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), err_msg("allocation total is not consistent " SIZE_FORMAT " vs " SIZE_FORMAT, - allocation_total(), sum_used_in_chunks_in_use())); + allocated_blocks_words(), sum_used_in_chunks_in_use())); } #endif @@ -2314,14 +2469,65 @@ void SpaceManager::mangle_freed_chunks() { // MetaspaceAux -size_t MetaspaceAux::used_in_bytes(Metaspace::MetadataType mdtype) { + +size_t MetaspaceAux::_allocated_capacity_words = 0; +size_t MetaspaceAux::_allocated_used_words = 0; + +size_t MetaspaceAux::free_bytes() { + size_t result = 0; + if (Metaspace::class_space_list() != NULL) { + result = result + Metaspace::class_space_list()->free_bytes(); + } + if (Metaspace::space_list() != NULL) { + result = result + Metaspace::space_list()->free_bytes(); + } + return result; +} + +void MetaspaceAux::dec_capacity(size_t words) { + assert_lock_strong(SpaceManager::expand_lock()); + assert(words <= _allocated_capacity_words, + err_msg("About to decrement below 0: words " SIZE_FORMAT + " is greater than _allocated_capacity_words " SIZE_FORMAT, + words, _allocated_capacity_words)); + _allocated_capacity_words = _allocated_capacity_words - words; +} + +void MetaspaceAux::inc_capacity(size_t words) { + assert_lock_strong(SpaceManager::expand_lock()); + // Needs to be atomic + _allocated_capacity_words = _allocated_capacity_words + words; +} + +void MetaspaceAux::dec_used(size_t words) { + assert(words <= _allocated_used_words, + err_msg("About to decrement below 0: words " SIZE_FORMAT + " is greater than _allocated_used_words " SIZE_FORMAT, + words, _allocated_used_words)); + // For CMS deallocation of the Metaspaces occurs during the + // sweep which is a concurrent phase. Protection by the expand_lock() + // is not enough since allocation is on a per Metaspace basis + // and protected by the Metaspace lock. + jlong minus_words = (jlong) - (jlong) words; + Atomic::add_ptr(minus_words, &_allocated_used_words); +} + +void MetaspaceAux::inc_used(size_t words) { + // _allocated_used_words tracks allocations for + // each piece of metadata. Those allocations are + // generally done concurrently by different application + // threads so must be done atomically. + Atomic::add_ptr(words, &_allocated_used_words); +} + +size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { size_t used = 0; ClassLoaderDataGraphMetaspaceIterator iter; while (iter.repeat()) { Metaspace* msp = iter.get_next(); - // Sum allocation_total for each metaspace + // Sum allocated_blocks_words for each metaspace if (msp != NULL) { - used += msp->used_words(mdtype); + used += msp->used_words_slow(mdtype); } } return used * BytesPerWord; @@ -2339,13 +2545,15 @@ size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) { return free * BytesPerWord; } -size_t MetaspaceAux::capacity_in_bytes(Metaspace::MetadataType mdtype) { - size_t capacity = free_chunks_total(mdtype); +size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { + // Don't count the space in the freelists. That space will be + // added to the capacity calculation as needed. + size_t capacity = 0; ClassLoaderDataGraphMetaspaceIterator iter; while (iter.repeat()) { Metaspace* msp = iter.get_next(); if (msp != NULL) { - capacity += msp->capacity_words(mdtype); + capacity += msp->capacity_words_slow(mdtype); } } return capacity * BytesPerWord; @@ -2372,23 +2580,30 @@ size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) return free_chunks_total(mdtype) * BytesPerWord; } +size_t MetaspaceAux::free_chunks_total() { + return free_chunks_total(Metaspace::ClassType) + + free_chunks_total(Metaspace::NonClassType); +} + +size_t MetaspaceAux::free_chunks_total_in_bytes() { + return free_chunks_total() * BytesPerWord; +} + void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { gclog_or_tty->print(", [Metaspace:"); if (PrintGCDetails && Verbose) { gclog_or_tty->print(" " SIZE_FORMAT "->" SIZE_FORMAT - "(" SIZE_FORMAT "/" SIZE_FORMAT ")", + "(" SIZE_FORMAT ")", prev_metadata_used, - used_in_bytes(), - capacity_in_bytes(), + allocated_capacity_bytes(), reserved_in_bytes()); } else { gclog_or_tty->print(" " SIZE_FORMAT "K" "->" SIZE_FORMAT "K" - "(" SIZE_FORMAT "K/" SIZE_FORMAT "K)", + "(" SIZE_FORMAT "K)", prev_metadata_used / K, - used_in_bytes()/ K, - capacity_in_bytes()/K, + allocated_capacity_bytes() / K, reserved_in_bytes()/ K); } @@ -2403,23 +2618,30 @@ void MetaspaceAux::print_on(outputStream* out) { out->print_cr(" Metaspace total " SIZE_FORMAT "K, used " SIZE_FORMAT "K," " reserved " SIZE_FORMAT "K", - capacity_in_bytes()/K, used_in_bytes()/K, reserved_in_bytes()/K); - out->print_cr(" data space " - SIZE_FORMAT "K, used " SIZE_FORMAT "K," - " reserved " SIZE_FORMAT "K", - capacity_in_bytes(nct)/K, used_in_bytes(nct)/K, reserved_in_bytes(nct)/K); - out->print_cr(" class space " - SIZE_FORMAT "K, used " SIZE_FORMAT "K," - " reserved " SIZE_FORMAT "K", - capacity_in_bytes(ct)/K, used_in_bytes(ct)/K, reserved_in_bytes(ct)/K); + allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K); +#if 0 +// The calls to capacity_bytes_slow() and used_bytes_slow() cause +// lock ordering assertion failures with some collectors. Do +// not include this code until the lock ordering is fixed. + if (PrintGCDetails && Verbose) { + out->print_cr(" data space " + SIZE_FORMAT "K, used " SIZE_FORMAT "K," + " reserved " SIZE_FORMAT "K", + capacity_bytes_slow(nct)/K, used_bytes_slow(nct)/K, reserved_in_bytes(nct)/K); + out->print_cr(" class space " + SIZE_FORMAT "K, used " SIZE_FORMAT "K," + " reserved " SIZE_FORMAT "K", + capacity_bytes_slow(ct)/K, used_bytes_slow(ct)/K, reserved_in_bytes(ct)/K); + } +#endif } // Print information for class space and data space separately. // This is almost the same as above. void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype); - size_t capacity_bytes = capacity_in_bytes(mdtype); - size_t used_bytes = used_in_bytes(mdtype); + size_t capacity_bytes = capacity_bytes_slow(mdtype); + size_t used_bytes = used_bytes_slow(mdtype); size_t free_bytes = free_in_bytes(mdtype); size_t used_and_free = used_bytes + free_bytes + free_chunks_capacity_bytes; @@ -2492,6 +2714,36 @@ void MetaspaceAux::verify_free_chunks() { Metaspace::class_space_list()->chunk_manager()->verify(); } +void MetaspaceAux::verify_capacity() { +#ifdef ASSERT + size_t running_sum_capacity_bytes = allocated_capacity_bytes(); + // For purposes of the running sum of used, verify against capacity + size_t capacity_in_use_bytes = capacity_bytes_slow(); + assert(running_sum_capacity_bytes == capacity_in_use_bytes, + err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT + " capacity_bytes_slow()" SIZE_FORMAT, + running_sum_capacity_bytes, capacity_in_use_bytes)); +#endif +} + +void MetaspaceAux::verify_used() { +#ifdef ASSERT + size_t running_sum_used_bytes = allocated_used_bytes(); + // For purposes of the running sum of used, verify against capacity + size_t used_in_use_bytes = used_bytes_slow(); + assert(allocated_used_bytes() == used_in_use_bytes, + err_msg("allocated_used_bytes() " SIZE_FORMAT + " used_bytes_slow()()" SIZE_FORMAT, + allocated_used_bytes(), used_in_use_bytes)); +#endif +} + +void MetaspaceAux::verify_metrics() { + verify_capacity(); + verify_used(); +} + + // Metaspace methods size_t Metaspace::_first_chunk_word_size = 0; @@ -2641,8 +2893,8 @@ MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) MetaWord* result; MetaspaceGC::set_expand_after_GC(true); size_t before_inc = MetaspaceGC::capacity_until_GC(); - size_t delta_words = MetaspaceGC::delta_capacity_until_GC(word_size); - MetaspaceGC::inc_capacity_until_GC(delta_words); + size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord; + MetaspaceGC::inc_capacity_until_GC(delta_bytes); if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC()); @@ -2660,8 +2912,8 @@ char* Metaspace::bottom() const { return (char*)vsm()->current_chunk()->bottom(); } -size_t Metaspace::used_words(MetadataType mdtype) const { - // return vsm()->allocation_total(); +size_t Metaspace::used_words_slow(MetadataType mdtype) const { + // return vsm()->allocated_used_words(); return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() : vsm()->sum_used_in_chunks_in_use(); // includes overhead! } @@ -2676,16 +2928,24 @@ size_t Metaspace::free_words(MetadataType mdtype) const { // have been made. Don't include space in the global freelist and // in the space available in the dictionary which // is already counted in some chunk. -size_t Metaspace::capacity_words(MetadataType mdtype) const { +size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() : vsm()->sum_capacity_in_chunks_in_use(); } +size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { + return used_words_slow(mdtype) * BytesPerWord; +} + +size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { + return capacity_words_slow(mdtype) * BytesPerWord; +} + void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { if (SafepointSynchronize::is_at_safepoint()) { assert(Thread::current()->is_VM_thread(), "should be the VM thread"); // Don't take Heap_lock - MutexLocker ml(vsm()->lock()); + MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); if (word_size < TreeChunk ::min_size()) { // Dark matter. Too small for dictionary. #ifdef ASSERT @@ -2699,7 +2959,7 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { vsm()->deallocate(ptr, word_size); } } else { - MutexLocker ml(vsm()->lock()); + MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); if (word_size < TreeChunk ::min_size()) { // Dark matter. Too small for dictionary. @@ -2773,6 +3033,13 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, return Metablock::initialize(result, word_size); } +void Metaspace::purge() { + MutexLockerEx cl(SpaceManager::expand_lock(), + Mutex::_no_safepoint_check_flag); + space_list()->purge(); + class_space_list()->purge(); +} + void Metaspace::print_on(outputStream* out) const { // Print both class virtual space counts and metaspace. if (Verbose) { @@ -2790,7 +3057,8 @@ bool Metaspace::contains(const void * ptr) { // aren't deleted presently. When they are, some sort of locking might // be needed. Note, locking this can cause inversion problems with the // caller in MetaspaceObj::is_metadata() function. - return space_list()->contains(ptr) || class_space_list()->contains(ptr); + return space_list()->contains(ptr) || + class_space_list()->contains(ptr); } void Metaspace::verify() { @@ -2799,10 +3067,6 @@ void Metaspace::verify() { } void Metaspace::dump(outputStream* const out) const { - if (UseMallocOnly) { - // Just print usage for now - out->print_cr("usage %d", used_words(Metaspace::NonClassType)); - } out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm()); vsm()->dump(out); out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm()); diff --git a/hotspot/src/share/vm/memory/metaspace.hpp b/hotspot/src/share/vm/memory/metaspace.hpp index f704804795f..1108c79feab 100644 --- a/hotspot/src/share/vm/memory/metaspace.hpp +++ b/hotspot/src/share/vm/memory/metaspace.hpp @@ -111,6 +111,10 @@ class Metaspace : public CHeapObj { SpaceManager* _class_vsm; SpaceManager* class_vsm() const { return _class_vsm; } + // Allocate space for metadata of type mdtype. This is space + // within a Metachunk and is used by + // allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS) + // which returns a Metablock. MetaWord* allocate(size_t word_size, MetadataType mdtype); // Virtual Space lists for both classes and other metadata @@ -133,11 +137,14 @@ class Metaspace : public CHeapObj { static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; } char* bottom() const; - size_t used_words(MetadataType mdtype) const; + size_t used_words_slow(MetadataType mdtype) const; size_t free_words(MetadataType mdtype) const; - size_t capacity_words(MetadataType mdtype) const; + size_t capacity_words_slow(MetadataType mdtype) const; size_t waste_words(MetadataType mdtype) const; + size_t used_bytes_slow(MetadataType mdtype) const; + size_t capacity_bytes_slow(MetadataType mdtype) const; + static Metablock* allocate(ClassLoaderData* loader_data, size_t size, bool read_only, MetadataType mdtype, TRAPS); void deallocate(MetaWord* ptr, size_t byte_size, bool is_class); @@ -150,6 +157,9 @@ class Metaspace : public CHeapObj { static bool contains(const void *ptr); void dump(outputStream* const out) const; + // Free empty virtualspaces + static void purge(); + void print_on(outputStream* st) const; // Debugging support void verify(); @@ -158,28 +168,81 @@ class Metaspace : public CHeapObj { class MetaspaceAux : AllStatic { // Statistics for class space and data space in metaspace. - static size_t used_in_bytes(Metaspace::MetadataType mdtype); + + // These methods iterate over the classloader data graph + // for the given Metaspace type. These are slow. + static size_t used_bytes_slow(Metaspace::MetadataType mdtype); static size_t free_in_bytes(Metaspace::MetadataType mdtype); - static size_t capacity_in_bytes(Metaspace::MetadataType mdtype); + static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype); + + // Iterates over the virtual space list. static size_t reserved_in_bytes(Metaspace::MetadataType mdtype); static size_t free_chunks_total(Metaspace::MetadataType mdtype); static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype); public: - // Total of space allocated to metadata in all Metaspaces - static size_t used_in_bytes() { - return used_in_bytes(Metaspace::ClassType) + - used_in_bytes(Metaspace::NonClassType); + // Running sum of space in all Metachunks that has been + // allocated to a Metaspace. This is used instead of + // iterating over all the classloaders + static size_t _allocated_capacity_words; + // Running sum of space in all Metachunks that have + // are being used for metadata. + static size_t _allocated_used_words; + + public: + // Decrement and increment _allocated_capacity_words + static void dec_capacity(size_t words); + static void inc_capacity(size_t words); + + // Decrement and increment _allocated_used_words + static void dec_used(size_t words); + static void inc_used(size_t words); + + // Total of space allocated to metadata in all Metaspaces. + // This sums the space used in each Metachunk by + // iterating over the classloader data graph + static size_t used_bytes_slow() { + return used_bytes_slow(Metaspace::ClassType) + + used_bytes_slow(Metaspace::NonClassType); } - // Total of available space in all Metaspaces - // Total of capacity allocated to all Metaspaces. This includes - // space in Metachunks not yet allocated and in the Metachunk - // freelist. - static size_t capacity_in_bytes() { - return capacity_in_bytes(Metaspace::ClassType) + - capacity_in_bytes(Metaspace::NonClassType); + // Used by MetaspaceCounters + static size_t free_chunks_total(); + static size_t free_chunks_total_in_bytes(); + + static size_t allocated_capacity_words() { + return _allocated_capacity_words; + } + static size_t allocated_capacity_bytes() { + return _allocated_capacity_words * BytesPerWord; + } + + static size_t allocated_used_words() { + return _allocated_used_words; + } + static size_t allocated_used_bytes() { + return _allocated_used_words * BytesPerWord; + } + + static size_t free_bytes(); + + // Total capacity in all Metaspaces + static size_t capacity_bytes_slow() { +#ifdef PRODUCT + // Use allocated_capacity_bytes() in PRODUCT instead of this function. + guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); +#endif + size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); + size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); + assert(allocated_capacity_bytes() == class_capacity + non_class_capacity, + err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT + " class_capacity + non_class_capacity " SIZE_FORMAT + " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, + allocated_capacity_bytes(), class_capacity + non_class_capacity, + class_capacity, non_class_capacity)); + + return class_capacity + non_class_capacity; } // Total space reserved in all Metaspaces @@ -198,6 +261,11 @@ class MetaspaceAux : AllStatic { static void print_waste(outputStream* out); static void dump(outputStream* out); static void verify_free_chunks(); + // Checks that the values returned by allocated_capacity_bytes() and + // capacity_bytes_slow() are the same. + static void verify_capacity(); + static void verify_used(); + static void verify_metrics(); }; // Metaspace are deallocated when their class loader are GC'ed. @@ -232,7 +300,6 @@ class MetaspaceGC : AllStatic { public: static size_t capacity_until_GC() { return _capacity_until_GC; } - static size_t capacity_until_GC_in_bytes() { return _capacity_until_GC * BytesPerWord; } static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; } static void dec_capacity_until_GC(size_t v) { _capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0; diff --git a/hotspot/src/share/vm/memory/metaspaceCounters.cpp b/hotspot/src/share/vm/memory/metaspaceCounters.cpp index dc2f4f733aa..b2be29bca2f 100644 --- a/hotspot/src/share/vm/memory/metaspaceCounters.cpp +++ b/hotspot/src/share/vm/memory/metaspaceCounters.cpp @@ -29,6 +29,16 @@ MetaspaceCounters* MetaspaceCounters::_metaspace_counters = NULL; +size_t MetaspaceCounters::calc_total_capacity() { + // The total capacity is the sum of + // 1) capacity of Metachunks in use by all Metaspaces + // 2) unused space at the end of each Metachunk + // 3) space in the freelist + size_t total_capacity = MetaspaceAux::allocated_capacity_bytes() + + MetaspaceAux::free_bytes() + MetaspaceAux::free_chunks_total_in_bytes(); + return total_capacity; +} + MetaspaceCounters::MetaspaceCounters() : _capacity(NULL), _used(NULL), @@ -36,8 +46,8 @@ MetaspaceCounters::MetaspaceCounters() : if (UsePerfData) { size_t min_capacity = MetaspaceAux::min_chunk_size(); size_t max_capacity = MetaspaceAux::reserved_in_bytes(); - size_t curr_capacity = MetaspaceAux::capacity_in_bytes(); - size_t used = MetaspaceAux::used_in_bytes(); + size_t curr_capacity = calc_total_capacity(); + size_t used = MetaspaceAux::allocated_used_bytes(); initialize(min_capacity, max_capacity, curr_capacity, used); } @@ -82,15 +92,13 @@ void MetaspaceCounters::initialize(size_t min_capacity, void MetaspaceCounters::update_capacity() { assert(UsePerfData, "Should not be called unless being used"); - assert(_capacity != NULL, "Should be initialized"); - size_t capacity_in_bytes = MetaspaceAux::capacity_in_bytes(); - _capacity->set_value(capacity_in_bytes); + size_t total_capacity = calc_total_capacity(); + _capacity->set_value(total_capacity); } void MetaspaceCounters::update_used() { assert(UsePerfData, "Should not be called unless being used"); - assert(_used != NULL, "Should be initialized"); - size_t used_in_bytes = MetaspaceAux::used_in_bytes(); + size_t used_in_bytes = MetaspaceAux::allocated_used_bytes(); _used->set_value(used_in_bytes); } diff --git a/hotspot/src/share/vm/memory/metaspaceCounters.hpp b/hotspot/src/share/vm/memory/metaspaceCounters.hpp index 4b6de646b60..46a9308888a 100644 --- a/hotspot/src/share/vm/memory/metaspaceCounters.hpp +++ b/hotspot/src/share/vm/memory/metaspaceCounters.hpp @@ -37,6 +37,7 @@ class MetaspaceCounters: public CHeapObj { size_t max_capacity, size_t curr_capacity, size_t used); + size_t calc_total_capacity(); public: MetaspaceCounters(); ~MetaspaceCounters(); diff --git a/hotspot/src/share/vm/memory/metaspaceShared.cpp b/hotspot/src/share/vm/memory/metaspaceShared.cpp index 4f53114c6cd..5f0f152e975 100644 --- a/hotspot/src/share/vm/memory/metaspaceShared.cpp +++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp @@ -376,18 +376,17 @@ void VM_PopulateDumpSharedSpace::doit() { const char* fmt = "%s space: %9d [ %4.1f%% of total] out of %9d bytes [%4.1f%% used] at " PTR_FORMAT; Metaspace* ro_space = _loader_data->ro_metaspace(); Metaspace* rw_space = _loader_data->rw_metaspace(); - const size_t BPW = BytesPerWord; // Allocated size of each space (may not be all occupied) - const size_t ro_alloced = ro_space->capacity_words(Metaspace::NonClassType) * BPW; - const size_t rw_alloced = rw_space->capacity_words(Metaspace::NonClassType) * BPW; + const size_t ro_alloced = ro_space->capacity_bytes_slow(Metaspace::NonClassType); + const size_t rw_alloced = rw_space->capacity_bytes_slow(Metaspace::NonClassType); const size_t md_alloced = md_end-md_low; const size_t mc_alloced = mc_end-mc_low; const size_t total_alloced = ro_alloced + rw_alloced + md_alloced + mc_alloced; // Occupied size of each space. - const size_t ro_bytes = ro_space->used_words(Metaspace::NonClassType) * BPW; - const size_t rw_bytes = rw_space->used_words(Metaspace::NonClassType) * BPW; + const size_t ro_bytes = ro_space->used_bytes_slow(Metaspace::NonClassType); + const size_t rw_bytes = rw_space->used_bytes_slow(Metaspace::NonClassType); const size_t md_bytes = size_t(md_top - md_low); const size_t mc_bytes = size_t(mc_top - mc_low); diff --git a/hotspot/src/share/vm/memory/sharedHeap.cpp b/hotspot/src/share/vm/memory/sharedHeap.cpp index caef7ac7ad0..cd577d4b57e 100644 --- a/hotspot/src/share/vm/memory/sharedHeap.cpp +++ b/hotspot/src/share/vm/memory/sharedHeap.cpp @@ -218,14 +218,13 @@ public: static AlwaysTrueClosure always_true; void SharedHeap::process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots, - OopClosure* non_root_closure) { + CodeBlobClosure* code_roots) { // Global (weak) JNI handles JNIHandles::weak_oops_do(&always_true, root_closure); CodeCache::blobs_do(code_roots); - StringTable::oops_do(root_closure); - } + StringTable::oops_do(root_closure); +} void SharedHeap::set_barrier_set(BarrierSet* bs) { _barrier_set = bs; diff --git a/hotspot/src/share/vm/memory/sharedHeap.hpp b/hotspot/src/share/vm/memory/sharedHeap.hpp index 2f8e2d910c2..b13bf15b846 100644 --- a/hotspot/src/share/vm/memory/sharedHeap.hpp +++ b/hotspot/src/share/vm/memory/sharedHeap.hpp @@ -249,8 +249,7 @@ public: // JNI weak roots, the code cache, system dictionary, symbol table, // string table. void process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots, - OopClosure* non_root_closure); + CodeBlobClosure* code_roots); // The functions below are helper functions that a subclass of // "SharedHeap" can use in the implementation of its virtual diff --git a/hotspot/src/share/vm/memory/universe.cpp b/hotspot/src/share/vm/memory/universe.cpp index 90a2276cb93..a0849df9914 100644 --- a/hotspot/src/share/vm/memory/universe.cpp +++ b/hotspot/src/share/vm/memory/universe.cpp @@ -1270,7 +1270,7 @@ void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) { st->print_cr("}"); } -void Universe::verify(bool silent, VerifyOption option) { +void Universe::verify(VerifyOption option, const char* prefix, bool silent) { // The use of _verify_in_progress is a temporary work around for // 6320749. Don't bother with a creating a class to set and clear // it since it is only used in this method and the control flow is @@ -1287,11 +1287,12 @@ void Universe::verify(bool silent, VerifyOption option) { HandleMark hm; // Handles created during verification can be zapped _verify_count++; + if (!silent) gclog_or_tty->print(prefix); if (!silent) gclog_or_tty->print("[Verifying "); if (!silent) gclog_or_tty->print("threads "); Threads::verify(); + if (!silent) gclog_or_tty->print("heap "); heap()->verify(silent, option); - if (!silent) gclog_or_tty->print("syms "); SymbolTable::verify(); if (!silent) gclog_or_tty->print("strs "); diff --git a/hotspot/src/share/vm/memory/universe.hpp b/hotspot/src/share/vm/memory/universe.hpp index 2bf0b653f58..48d32f71ed6 100644 --- a/hotspot/src/share/vm/memory/universe.hpp +++ b/hotspot/src/share/vm/memory/universe.hpp @@ -445,12 +445,12 @@ class Universe: AllStatic { // Debugging static bool verify_in_progress() { return _verify_in_progress; } - static void verify(bool silent, VerifyOption option); - static void verify(bool silent) { - verify(silent, VerifyOption_Default /* option */); + static void verify(VerifyOption option, const char* prefix, bool silent = VerifySilently); + static void verify(const char* prefix, bool silent = VerifySilently) { + verify(VerifyOption_Default, prefix, silent); } - static void verify() { - verify(false /* silent */); + static void verify(bool silent = VerifySilently) { + verify("", silent); } static int verify_count() { return _verify_count; } diff --git a/hotspot/src/share/vm/oops/constantPool.cpp b/hotspot/src/share/vm/oops/constantPool.cpp index 9045db75834..5c5ae945bda 100644 --- a/hotspot/src/share/vm/oops/constantPool.cpp +++ b/hotspot/src/share/vm/oops/constantPool.cpp @@ -40,6 +40,7 @@ #include "runtime/init.hpp" #include "runtime/javaCalls.hpp" #include "runtime/signature.hpp" +#include "runtime/synchronizer.hpp" #include "runtime/vframe.hpp" ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, TRAPS) { @@ -69,7 +70,6 @@ ConstantPool::ConstantPool(Array * tags) { // only set to non-zero if constant pool is merged by RedefineClasses set_version(0); - set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock")); // initialize tag array int length = tags->length(); @@ -95,9 +95,6 @@ void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) { void ConstantPool::release_C_heap_structures() { // walk constant pool and decrement symbol reference counts unreference_symbols(); - - delete _lock; - set_lock(NULL); } objArrayOop ConstantPool::resolved_references() const { @@ -154,9 +151,6 @@ void ConstantPool::restore_unshareable_info(TRAPS) { ClassLoaderData* loader_data = pool_holder()->class_loader_data(); set_resolved_references(loader_data->add_handle(refs_handle)); } - - // Also need to recreate the mutex. Make sure this matches the constructor - set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock")); } } @@ -167,7 +161,23 @@ void ConstantPool::remove_unshareable_info() { set_resolved_reference_length( resolved_references() != NULL ? resolved_references()->length() : 0); set_resolved_references(NULL); - set_lock(NULL); +} + +oop ConstantPool::lock() { + if (_pool_holder) { + // We re-use the _pool_holder's init_lock to reduce footprint. + // Notes on deadlocks: + // [1] This lock is a Java oop, so it can be recursively locked by + // the same thread without self-deadlocks. + // [2] Deadlock will happen if there is circular dependency between + // the of two Java classes. However, in this case, + // the deadlock would have happened long before we reach + // ConstantPool::lock(), so reusing init_lock does not + // increase the possibility of deadlock. + return _pool_holder->init_lock(); + } else { + return NULL; + } } int ConstantPool::cp_to_object_index(int cp_index) { @@ -208,7 +218,9 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS Symbol* name = NULL; Handle loader; - { MonitorLockerEx ml(this_oop->lock()); + { + oop cplock = this_oop->lock(); + ObjectLocker ol(cplock , THREAD, cplock != NULL); if (this_oop->tag_at(which).is_unresolved_klass()) { if (this_oop->tag_at(which).is_unresolved_klass_in_error()) { @@ -255,7 +267,8 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS bool throw_orig_error = false; { - MonitorLockerEx ml(this_oop->lock()); + oop cplock = this_oop->lock(); + ObjectLocker ol(cplock, THREAD, cplock != NULL); // some other thread has beaten us and has resolved the class. if (this_oop->tag_at(which).is_klass()) { @@ -323,7 +336,8 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS } return k(); } else { - MonitorLockerEx ml(this_oop->lock()); + oop cplock = this_oop->lock(); + ObjectLocker ol(cplock, THREAD, cplock != NULL); // Only updated constant pool - if it is resolved. do_resolve = this_oop->tag_at(which).is_unresolved_klass(); if (do_resolve) { @@ -619,7 +633,8 @@ void ConstantPool::save_and_throw_exception(constantPoolHandle this_oop, int whi int tag, TRAPS) { ResourceMark rm; Symbol* error = PENDING_EXCEPTION->klass()->name(); - MonitorLockerEx ml(this_oop->lock()); // lock cpool to change tag. + oop cplock = this_oop->lock(); + ObjectLocker ol(cplock, THREAD, cplock != NULL); // lock cpool to change tag. int error_tag = (tag == JVM_CONSTANT_MethodHandle) ? JVM_CONSTANT_MethodHandleInError : JVM_CONSTANT_MethodTypeInError; @@ -780,7 +795,8 @@ oop ConstantPool::resolve_constant_at_impl(constantPoolHandle this_oop, int inde if (cache_index >= 0) { // Cache the oop here also. Handle result_handle(THREAD, result_oop); - MonitorLockerEx ml(this_oop->lock()); // don't know if we really need this + oop cplock = this_oop->lock(); + ObjectLocker ol(cplock, THREAD, cplock != NULL); // don't know if we really need this oop result = this_oop->resolved_references()->obj_at(cache_index); // Benign race condition: resolved_references may already be filled in while we were trying to lock. // The important thing here is that all threads pick up the same result. @@ -1043,24 +1059,13 @@ bool ConstantPool::compare_entry_to(int index1, constantPoolHandle cp2, case JVM_CONSTANT_InvokeDynamic: { - int k1 = invoke_dynamic_bootstrap_method_ref_index_at(index1); - int k2 = cp2->invoke_dynamic_bootstrap_method_ref_index_at(index2); - bool match = compare_entry_to(k1, cp2, k2, CHECK_false); - if (!match) return false; - k1 = invoke_dynamic_name_and_type_ref_index_at(index1); - k2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2); - match = compare_entry_to(k1, cp2, k2, CHECK_false); - if (!match) return false; - int argc = invoke_dynamic_argument_count_at(index1); - if (argc == cp2->invoke_dynamic_argument_count_at(index2)) { - for (int j = 0; j < argc; j++) { - k1 = invoke_dynamic_argument_index_at(index1, j); - k2 = cp2->invoke_dynamic_argument_index_at(index2, j); - match = compare_entry_to(k1, cp2, k2, CHECK_false); - if (!match) return false; - } - return true; // got through loop; all elements equal - } + int k1 = invoke_dynamic_name_and_type_ref_index_at(index1); + int k2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2); + int i1 = invoke_dynamic_bootstrap_specifier_index(index1); + int i2 = cp2->invoke_dynamic_bootstrap_specifier_index(index2); + bool match = compare_entry_to(k1, cp2, k2, CHECK_false) && + compare_operand_to(i1, cp2, i2, CHECK_false); + return match; } break; case JVM_CONSTANT_String: @@ -1095,6 +1100,80 @@ bool ConstantPool::compare_entry_to(int index1, constantPoolHandle cp2, } // end compare_entry_to() +// Resize the operands array with delta_len and delta_size. +// Used in RedefineClasses for CP merge. +void ConstantPool::resize_operands(int delta_len, int delta_size, TRAPS) { + int old_len = operand_array_length(operands()); + int new_len = old_len + delta_len; + int min_len = (delta_len > 0) ? old_len : new_len; + + int old_size = operands()->length(); + int new_size = old_size + delta_size; + int min_size = (delta_size > 0) ? old_size : new_size; + + ClassLoaderData* loader_data = pool_holder()->class_loader_data(); + Array * new_ops = MetadataFactory::new_array (loader_data, new_size, CHECK); + + // Set index in the resized array for existing elements only + for (int idx = 0; idx < min_len; idx++) { + int offset = operand_offset_at(idx); // offset in original array + operand_offset_at_put(new_ops, idx, offset + 2*delta_len); // offset in resized array + } + // Copy the bootstrap specifiers only + Copy::conjoint_memory_atomic(operands()->adr_at(2*old_len), + new_ops->adr_at(2*new_len), + (min_size - 2*min_len) * sizeof(u2)); + // Explicitly deallocate old operands array. + // Note, it is not needed for 7u backport. + if ( operands() != NULL) { // the safety check + MetadataFactory::free_array (loader_data, operands()); + } + set_operands(new_ops); +} // end resize_operands() + + +// Extend the operands array with the length and size of the ext_cp operands. +// Used in RedefineClasses for CP merge. +void ConstantPool::extend_operands(constantPoolHandle ext_cp, TRAPS) { + int delta_len = operand_array_length(ext_cp->operands()); + if (delta_len == 0) { + return; // nothing to do + } + int delta_size = ext_cp->operands()->length(); + + assert(delta_len > 0 && delta_size > 0, "extended operands array must be bigger"); + + if (operand_array_length(operands()) == 0) { + ClassLoaderData* loader_data = pool_holder()->class_loader_data(); + Array * new_ops = MetadataFactory::new_array (loader_data, delta_size, CHECK); + // The first element index defines the offset of second part + operand_offset_at_put(new_ops, 0, 2*delta_len); // offset in new array + set_operands(new_ops); + } else { + resize_operands(delta_len, delta_size, CHECK); + } + +} // end extend_operands() + + +// Shrink the operands array to a smaller array with new_len length. +// Used in RedefineClasses for CP merge. +void ConstantPool::shrink_operands(int new_len, TRAPS) { + int old_len = operand_array_length(operands()); + if (new_len == old_len) { + return; // nothing to do + } + assert(new_len < old_len, "shrunken operands array must be smaller"); + + int free_base = operand_next_offset_at(new_len - 1); + int delta_len = new_len - old_len; + int delta_size = 2*delta_len + free_base - operands()->length(); + + resize_operands(delta_len, delta_size, CHECK); + +} // end shrink_operands() + + void ConstantPool::copy_operands(constantPoolHandle from_cp, constantPoolHandle to_cp, TRAPS) { @@ -1357,6 +1436,46 @@ int ConstantPool::find_matching_entry(int pattern_i, } // end find_matching_entry() +// Compare this constant pool's bootstrap specifier at idx1 to the constant pool +// cp2's bootstrap specifier at idx2. +bool ConstantPool::compare_operand_to(int idx1, constantPoolHandle cp2, int idx2, TRAPS) { + int k1 = operand_bootstrap_method_ref_index_at(idx1); + int k2 = cp2->operand_bootstrap_method_ref_index_at(idx2); + bool match = compare_entry_to(k1, cp2, k2, CHECK_false); + + if (!match) { + return false; + } + int argc = operand_argument_count_at(idx1); + if (argc == cp2->operand_argument_count_at(idx2)) { + for (int j = 0; j < argc; j++) { + k1 = operand_argument_index_at(idx1, j); + k2 = cp2->operand_argument_index_at(idx2, j); + match = compare_entry_to(k1, cp2, k2, CHECK_false); + if (!match) { + return false; + } + } + return true; // got through loop; all elements equal + } + return false; +} // end compare_operand_to() + +// Search constant pool search_cp for a bootstrap specifier that matches +// this constant pool's bootstrap specifier at pattern_i index. +// Return the index of a matching bootstrap specifier or (-1) if there is no match. +int ConstantPool::find_matching_operand(int pattern_i, + constantPoolHandle search_cp, int search_len, TRAPS) { + for (int i = 0; i < search_len; i++) { + bool found = compare_operand_to(pattern_i, search_cp, i, CHECK_(-1)); + if (found) { + return i; + } + } + return -1; // bootstrap specifier not found; return unused index (-1) +} // end find_matching_operand() + + #ifndef PRODUCT const char* ConstantPool::printable_name_at(int which) { diff --git a/hotspot/src/share/vm/oops/constantPool.hpp b/hotspot/src/share/vm/oops/constantPool.hpp index ad4e2cfce6d..58cad7a6771 100644 --- a/hotspot/src/share/vm/oops/constantPool.hpp +++ b/hotspot/src/share/vm/oops/constantPool.hpp @@ -111,7 +111,6 @@ class ConstantPool : public Metadata { int _version; } _saved; - Monitor* _lock; void set_tags(Array * tags) { _tags = tags; } void tag_at_put(int which, jbyte t) { tags()->at_put(which, t); } @@ -567,6 +566,47 @@ class ConstantPool : public Metadata { _indy_argc_offset = 1, // u2 argc _indy_argv_offset = 2 // u2 argv[argc] }; + + // These functions are used in RedefineClasses for CP merge + + int operand_offset_at(int bootstrap_specifier_index) { + assert(0 <= bootstrap_specifier_index && + bootstrap_specifier_index < operand_array_length(operands()), + "Corrupted CP operands"); + return operand_offset_at(operands(), bootstrap_specifier_index); + } + int operand_bootstrap_method_ref_index_at(int bootstrap_specifier_index) { + int offset = operand_offset_at(bootstrap_specifier_index); + return operands()->at(offset + _indy_bsm_offset); + } + int operand_argument_count_at(int bootstrap_specifier_index) { + int offset = operand_offset_at(bootstrap_specifier_index); + int argc = operands()->at(offset + _indy_argc_offset); + return argc; + } + int operand_argument_index_at(int bootstrap_specifier_index, int j) { + int offset = operand_offset_at(bootstrap_specifier_index); + return operands()->at(offset + _indy_argv_offset + j); + } + int operand_next_offset_at(int bootstrap_specifier_index) { + int offset = operand_offset_at(bootstrap_specifier_index) + _indy_argv_offset + + operand_argument_count_at(bootstrap_specifier_index); + return offset; + } + // Compare a bootsrap specifier in the operands arrays + bool compare_operand_to(int bootstrap_specifier_index1, constantPoolHandle cp2, + int bootstrap_specifier_index2, TRAPS); + // Find a bootsrap specifier in the operands array + int find_matching_operand(int bootstrap_specifier_index, constantPoolHandle search_cp, + int operands_cur_len, TRAPS); + // Resize the operands array with delta_len and delta_size + void resize_operands(int delta_len, int delta_size, TRAPS); + // Extend the operands array with the length and size of the ext_cp operands + void extend_operands(constantPoolHandle ext_cp, TRAPS); + // Shrink the operands array to a smaller array with new_len length + void shrink_operands(int new_len, TRAPS); + + int invoke_dynamic_bootstrap_method_ref_index_at(int which) { assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool"); int op_base = invoke_dynamic_operand_base(which); @@ -782,8 +822,17 @@ class ConstantPool : public Metadata { void set_resolved_reference_length(int length) { _saved._resolved_reference_length = length; } int resolved_reference_length() const { return _saved._resolved_reference_length; } - void set_lock(Monitor* lock) { _lock = lock; } - Monitor* lock() { return _lock; } + + // lock() may return null -- constant pool updates may happen before this lock is + // initialized, because the _pool_holder has not been fully initialized and + // has not been registered into the system dictionary. In this case, no other + // thread can be modifying this constantpool, so no synchronization is + // necessary. + // + // Use cplock() like this: + // oop cplock = cp->lock(); + // ObjectLocker ol(cplock , THREAD, cplock != NULL); + oop lock(); // Decrease ref counts of symbols that are in the constant pool // when the holder class is unloaded diff --git a/hotspot/src/share/vm/oops/cpCache.cpp b/hotspot/src/share/vm/oops/cpCache.cpp index c2175ca81ff..bc15e282b77 100644 --- a/hotspot/src/share/vm/oops/cpCache.cpp +++ b/hotspot/src/share/vm/oops/cpCache.cpp @@ -266,7 +266,8 @@ void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool, // the lock, so that when the losing writer returns, he can use the linked // cache entry. - MonitorLockerEx ml(cpool->lock()); + oop cplock = cpool->lock(); + ObjectLocker ol(cplock, Thread::current(), cplock != NULL); if (!is_f1_null()) { return; } diff --git a/hotspot/src/share/vm/oops/instanceKlass.cpp b/hotspot/src/share/vm/oops/instanceKlass.cpp index e3539f3eb8e..ecb0dcbac93 100644 --- a/hotspot/src/share/vm/oops/instanceKlass.cpp +++ b/hotspot/src/share/vm/oops/instanceKlass.cpp @@ -54,6 +54,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/thread.inline.hpp" +#include "services/classLoadingService.hpp" #include "services/threadService.hpp" #include "utilities/dtrace.hpp" #include "utilities/macros.hpp" @@ -418,25 +419,6 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) { set_annotations(NULL); } -volatile oop InstanceKlass::init_lock() const { - volatile oop lock = _init_lock; // read once - assert((oop)lock != NULL || !is_not_initialized(), // initialized or in_error state - "only fully initialized state can have a null lock"); - return lock; -} - -// Set the initialization lock to null so the object can be GC'ed. Any racing -// threads to get this lock will see a null lock and will not lock. -// That's okay because they all check for initialized state after getting -// the lock and return. -void InstanceKlass::fence_and_clear_init_lock() { - // make sure previous stores are all done, notably the init_state. - OrderAccess::storestore(); - klass_oop_store(&_init_lock, NULL); - assert(!is_not_initialized(), "class must be initialized now"); -} - - bool InstanceKlass::should_be_initialized() const { return !is_initialized(); } @@ -473,7 +455,7 @@ void InstanceKlass::eager_initialize(Thread *thread) { void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) { EXCEPTION_MARK; volatile oop init_lock = this_oop->init_lock(); - ObjectLocker ol(init_lock, THREAD, init_lock != NULL); + ObjectLocker ol(init_lock, THREAD); // abort if someone beat us to the initialization if (!this_oop->is_not_initialized()) return; // note: not equivalent to is_initialized() @@ -492,7 +474,6 @@ void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) { } else { // linking successfull, mark class as initialized this_oop->set_init_state (fully_initialized); - this_oop->fence_and_clear_init_lock(); // trace if (TraceClassInitialization) { ResourceMark rm(THREAD); @@ -619,7 +600,7 @@ bool InstanceKlass::link_class_impl( // verification & rewriting { volatile oop init_lock = this_oop->init_lock(); - ObjectLocker ol(init_lock, THREAD, init_lock != NULL); + ObjectLocker ol(init_lock, THREAD); // rewritten will have been set if loader constraint error found // on an earlier link attempt // don't verify or rewrite if already rewritten @@ -742,7 +723,7 @@ void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) { // Step 1 { volatile oop init_lock = this_oop->init_lock(); - ObjectLocker ol(init_lock, THREAD, init_lock != NULL); + ObjectLocker ol(init_lock, THREAD); Thread *self = THREAD; // it's passed the current thread @@ -890,9 +871,8 @@ void InstanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) void InstanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) { volatile oop init_lock = this_oop->init_lock(); - ObjectLocker ol(init_lock, THREAD, init_lock != NULL); + ObjectLocker ol(init_lock, THREAD); this_oop->set_init_state(state); - this_oop->fence_and_clear_init_lock(); ol.notify_all(CHECK); } @@ -2312,7 +2292,29 @@ static void clear_all_breakpoints(Method* m) { m->clear_all_breakpoints(); } + +void InstanceKlass::notify_unload_class(InstanceKlass* ik) { + // notify the debugger + if (JvmtiExport::should_post_class_unload()) { + JvmtiExport::post_class_unload(ik); + } + + // notify ClassLoadingService of class unload + ClassLoadingService::notify_class_unloaded(ik); +} + +void InstanceKlass::release_C_heap_structures(InstanceKlass* ik) { + // Clean up C heap + ik->release_C_heap_structures(); + ik->constants()->release_C_heap_structures(); +} + void InstanceKlass::release_C_heap_structures() { + + // Can't release the constant pool here because the constant pool can be + // deallocated separately from the InstanceKlass for default methods and + // redefine classes. + // Deallocate oop map cache if (_oop_map_cache != NULL) { delete _oop_map_cache; @@ -2329,6 +2331,12 @@ void InstanceKlass::release_C_heap_structures() { FreeHeap(jmeths); } + MemberNameTable* mnt = member_names(); + if (mnt != NULL) { + delete mnt; + set_member_names(NULL); + } + int* indices = methods_cached_itable_indices_acquire(); if (indices != (int*)NULL) { release_set_methods_cached_itable_indices(NULL); @@ -2757,6 +2765,17 @@ nmethod* InstanceKlass::lookup_osr_nmethod(Method* const m, int bci, int comp_le return NULL; } +void InstanceKlass::add_member_name(Handle mem_name) { + jweak mem_name_wref = JNIHandles::make_weak_global(mem_name); + MutexLocker ml(MemberNameTable_lock); + DEBUG_ONLY(No_Safepoint_Verifier nsv); + + if (_member_names == NULL) { + _member_names = new (ResourceObj::C_HEAP, mtClass) MemberNameTable(); + } + _member_names->add_member_name(mem_name_wref); +} + // ----------------------------------------------------------------------------------------------------- // Printing @@ -2820,7 +2839,7 @@ void InstanceKlass::print_on(outputStream* st) const { st->print(BULLET"protection domain: "); ((InstanceKlass*)this)->protection_domain()->print_value_on(st); st->cr(); st->print(BULLET"host class: "); host_klass()->print_value_on_maybe_null(st); st->cr(); st->print(BULLET"signers: "); signers()->print_value_on(st); st->cr(); - st->print(BULLET"init_lock: "); ((oop)_init_lock)->print_value_on(st); st->cr(); + st->print(BULLET"init_lock: "); ((oop)_init_lock)->print_value_on(st); st->cr(); if (source_file_name() != NULL) { st->print(BULLET"source file: "); source_file_name()->print_value_on(st); diff --git a/hotspot/src/share/vm/oops/instanceKlass.hpp b/hotspot/src/share/vm/oops/instanceKlass.hpp index 9d94b2436d9..55b8ff48c4c 100644 --- a/hotspot/src/share/vm/oops/instanceKlass.hpp +++ b/hotspot/src/share/vm/oops/instanceKlass.hpp @@ -90,6 +90,7 @@ class DepChange; class nmethodBucket; class PreviousVersionNode; class JvmtiCachedClassFieldMap; +class MemberNameTable; // This is used in iterators below. class FieldClosure: public StackObj { @@ -183,8 +184,9 @@ class InstanceKlass: public Klass { oop _protection_domain; // Class signers. objArrayOop _signers; - // Initialization lock. Must be one per class and it has to be a VM internal - // object so java code cannot lock it (like the mirror) + // Lock for (1) initialization; (2) access to the ConstantPool of this class. + // Must be one per class and it has to be a VM internal object so java code + // cannot lock it (like the mirror). // It has to be an object not a Mutex because it's held through java calls. volatile oop _init_lock; @@ -235,7 +237,7 @@ class InstanceKlass: public Klass { _misc_rewritten = 1 << 0, // methods rewritten. _misc_has_nonstatic_fields = 1 << 1, // for sizing with UseCompressedOops _misc_should_verify_class = 1 << 2, // allow caching of preverification - _misc_is_anonymous = 1 << 3, // has embedded _inner_classes field + _misc_is_anonymous = 1 << 3, // has embedded _host_klass field _misc_is_contended = 1 << 4, // marked with contended annotation _misc_has_default_methods = 1 << 5 // class/superclass/implemented interfaces has default methods }; @@ -246,6 +248,7 @@ class InstanceKlass: public Klass { int _vtable_len; // length of Java vtable (in words) int _itable_len; // length of Java itable (in words) OopMapCache* volatile _oop_map_cache; // OopMapCache for all methods in the klass (allocated lazily) + MemberNameTable* _member_names; // Member names JNIid* _jni_ids; // First JNI identifier for static fields in this class jmethodID* _methods_jmethod_ids; // jmethodIDs corresponding to method_idnum, or NULL if none int* _methods_cached_itable_indices; // itable_index cache for JNI invoke corresponding to methods idnum, or NULL @@ -932,7 +935,9 @@ class InstanceKlass: public Klass { // referenced by handles. bool on_stack() const { return _constants->on_stack(); } - void release_C_heap_structures(); + // callbacks for actions during class unloading + static void notify_unload_class(InstanceKlass* ik); + static void release_C_heap_structures(InstanceKlass* ik); // Parallel Scavenge and Parallel Old PARALLEL_GC_DECLS @@ -966,6 +971,7 @@ class InstanceKlass: public Klass { #endif // INCLUDE_ALL_GCS u2 idnum_allocated_count() const { return _idnum_allocated_count; } + private: // initialization state #ifdef ASSERT @@ -992,9 +998,10 @@ private: { OrderAccess::release_store_ptr(&_methods_cached_itable_indices, indices); } // Lock during initialization - volatile oop init_lock() const; - void set_init_lock(oop value) { klass_oop_store(&_init_lock, value); } - void fence_and_clear_init_lock(); // after fully_initialized +public: + volatile oop init_lock() const {return _init_lock; } +private: + void set_init_lock(oop value) { klass_oop_store(&_init_lock, value); } // Offsets for memory management oop* adr_protection_domain() const { return (oop*)&this->_protection_domain;} @@ -1020,6 +1027,8 @@ private: // Returns the array class with this class as element type Klass* array_klass_impl(bool or_null, TRAPS); + // Free CHeap allocated fields. + void release_C_heap_structures(); public: // CDS support - remove and restore oops from metadata. Oops are not shared. virtual void remove_unshareable_info(); @@ -1028,6 +1037,11 @@ public: // jvm support jint compute_modifier_flags(TRAPS) const; + // JSR-292 support + MemberNameTable* member_names() { return _member_names; } + void set_member_names(MemberNameTable* member_names) { _member_names = member_names; } + void add_member_name(Handle member_name); + public: // JVMTI support jint jvmti_class_status() const; diff --git a/hotspot/src/share/vm/oops/klassVtable.cpp b/hotspot/src/share/vm/oops/klassVtable.cpp index 3f8609532fa..ebc6e0aea88 100644 --- a/hotspot/src/share/vm/oops/klassVtable.cpp +++ b/hotspot/src/share/vm/oops/klassVtable.cpp @@ -519,6 +519,9 @@ bool klassVtable::is_miranda_entry_at(int i) { // check if a method is a miranda method, given a class's methods table and it's super // the caller must make sure that the method belongs to an interface implemented by the class bool klassVtable::is_miranda(Method* m, Array * class_methods, Klass* super) { + if (m->is_static()) { + return false; + } Symbol* name = m->name(); Symbol* signature = m->signature(); if (InstanceKlass::find_method(class_methods, name, signature) == NULL) { diff --git a/hotspot/src/share/vm/oops/method.cpp b/hotspot/src/share/vm/oops/method.cpp index 1e74a214830..74364c3bd6a 100644 --- a/hotspot/src/share/vm/oops/method.cpp +++ b/hotspot/src/share/vm/oops/method.cpp @@ -91,7 +91,7 @@ Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) { set_hidden(false); set_dont_inline(false); set_method_data(NULL); - set_interpreter_throwout_count(0); + set_method_counters(NULL); set_vtable_index(Method::garbage_vtable_index); // Fix and bury in Method* @@ -105,16 +105,6 @@ Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) { } NOT_PRODUCT(set_compiled_invocation_count(0);) - set_interpreter_invocation_count(0); - invocation_counter()->init(); - backedge_counter()->init(); - clear_number_of_breakpoints(); - -#ifdef TIERED - set_rate(0); - set_prev_event_count(0); - set_prev_time(0); -#endif } // Release Method*. The nmethod will be gone when we get here because @@ -124,6 +114,8 @@ void Method::deallocate_contents(ClassLoaderData* loader_data) { set_constMethod(NULL); MetadataFactory::free_metadata(loader_data, method_data()); set_method_data(NULL); + MetadataFactory::free_metadata(loader_data, method_counters()); + set_method_counters(NULL); // The nmethod will be gone when we get here. if (code() != NULL) _code = NULL; } @@ -323,7 +315,10 @@ bool Method::was_executed_more_than(int n) { // compiler does not bump invocation counter of compiled methods return true; } - else if (_invocation_counter.carry() || (method_data() != NULL && method_data()->invocation_counter()->carry())) { + else if ((method_counters() != NULL && + method_counters()->invocation_counter()->carry()) || + (method_data() != NULL && + method_data()->invocation_counter()->carry())) { // The carry bit is set when the counter overflows and causes // a compilation to occur. We don't know how many times // the counter has been reset, so we simply assume it has @@ -387,6 +382,18 @@ void Method::build_interpreter_method_data(methodHandle method, TRAPS) { } } +MethodCounters* Method::build_method_counters(Method* m, TRAPS) { + methodHandle mh(m); + ClassLoaderData* loader_data = mh->method_holder()->class_loader_data(); + MethodCounters* counters = MethodCounters::allocate(loader_data, CHECK_NULL); + if (mh->method_counters() == NULL) { + mh->set_method_counters(counters); + } else { + MetadataFactory::free_metadata(loader_data, counters); + } + return mh->method_counters(); +} + void Method::cleanup_inline_caches() { // The current system doesn't use inline caches in the interpreter // => nothing to do (keep this method around for future use) @@ -794,8 +801,6 @@ void Method::unlink_method() { set_signature_handler(NULL); } NOT_PRODUCT(set_compiled_invocation_count(0);) - invocation_counter()->reset(); - backedge_counter()->reset(); _adapter = NULL; _from_compiled_entry = NULL; @@ -808,8 +813,7 @@ void Method::unlink_method() { assert(!DumpSharedSpaces || _method_data == NULL, "unexpected method data?"); set_method_data(NULL); - set_interpreter_throwout_count(0); - set_interpreter_invocation_count(0); + set_method_counters(NULL); } // Called when the method_holder is getting linked. Setup entrypoints so the method @@ -873,7 +877,7 @@ address Method::verified_code_entry() { debug_only(No_Safepoint_Verifier nsv;) nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code); if (code == NULL && UseCodeCacheFlushing) { - nmethod *saved_code = CodeCache::find_and_remove_saved_code(this); + nmethod *saved_code = CodeCache::reanimate_saved_code(this); if (saved_code != NULL) { methodHandle method(this); assert( ! saved_code->is_osr_method(), "should not get here for osr" ); @@ -1545,28 +1549,34 @@ void Method::clear_all_breakpoints() { int Method::invocation_count() { + MethodCounters *mcs = method_counters(); if (TieredCompilation) { MethodData* const mdo = method_data(); - if (invocation_counter()->carry() || ((mdo != NULL) ? mdo->invocation_counter()->carry() : false)) { + if (((mcs != NULL) ? mcs->invocation_counter()->carry() : false) || + ((mdo != NULL) ? mdo->invocation_counter()->carry() : false)) { return InvocationCounter::count_limit; } else { - return invocation_counter()->count() + ((mdo != NULL) ? mdo->invocation_counter()->count() : 0); + return ((mcs != NULL) ? mcs->invocation_counter()->count() : 0) + + ((mdo != NULL) ? mdo->invocation_counter()->count() : 0); } } else { - return invocation_counter()->count(); + return (mcs == NULL) ? 0 : mcs->invocation_counter()->count(); } } int Method::backedge_count() { + MethodCounters *mcs = method_counters(); if (TieredCompilation) { MethodData* const mdo = method_data(); - if (backedge_counter()->carry() || ((mdo != NULL) ? mdo->backedge_counter()->carry() : false)) { + if (((mcs != NULL) ? mcs->backedge_counter()->carry() : false) || + ((mdo != NULL) ? mdo->backedge_counter()->carry() : false)) { return InvocationCounter::count_limit; } else { - return backedge_counter()->count() + ((mdo != NULL) ? mdo->backedge_counter()->count() : 0); + return ((mcs != NULL) ? mcs->backedge_counter()->count() : 0) + + ((mdo != NULL) ? mdo->backedge_counter()->count() : 0); } } else { - return backedge_counter()->count(); + return (mcs == NULL) ? 0 : mcs->backedge_counter()->count(); } } @@ -1621,12 +1631,12 @@ void BreakpointInfo::set(Method* method) { assert(orig_bytecode() == code, "original bytecode must be the same"); } #endif + Thread *thread = Thread::current(); *method->bcp_from(_bci) = Bytecodes::_breakpoint; - method->incr_number_of_breakpoints(); + method->incr_number_of_breakpoints(thread); SystemDictionary::notice_modification(); { // Deoptimize all dependents on this method - Thread *thread = Thread::current(); HandleMark hm(thread); methodHandle mh(thread, method); Universe::flush_dependents_on_method(mh); @@ -1636,7 +1646,7 @@ void BreakpointInfo::set(Method* method) { void BreakpointInfo::clear(Method* method) { *method->bcp_from(_bci) = orig_bytecode(); assert(method->number_of_breakpoints() > 0, "must not go negative"); - method->decr_number_of_breakpoints(); + method->decr_number_of_breakpoints(Thread::current()); } // jmethodID handling diff --git a/hotspot/src/share/vm/oops/method.hpp b/hotspot/src/share/vm/oops/method.hpp index b9e176f6ba8..b23a3955e72 100644 --- a/hotspot/src/share/vm/oops/method.hpp +++ b/hotspot/src/share/vm/oops/method.hpp @@ -31,6 +31,7 @@ #include "interpreter/invocationCounter.hpp" #include "oops/annotations.hpp" #include "oops/constantPool.hpp" +#include "oops/methodCounters.hpp" #include "oops/instanceKlass.hpp" #include "oops/oop.hpp" #include "oops/typeArrayOop.hpp" @@ -100,6 +101,7 @@ class CheckedExceptionElement; class LocalVariableTableElement; class AdapterHandlerEntry; class MethodData; +class MethodCounters; class ConstMethod; class InlineTableSizes; class KlassSizeStats; @@ -109,7 +111,7 @@ class Method : public Metadata { private: ConstMethod* _constMethod; // Method read-only data. MethodData* _method_data; - int _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered) + MethodCounters* _method_counters; AccessFlags _access_flags; // Access flags int _vtable_index; // vtable index of this method (see VtableIndexFlag) // note: can have vtables with >2**16 elements (because of inheritance) @@ -124,15 +126,6 @@ class Method : public Metadata { _hidden : 1, _dont_inline : 1, : 3; - u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting - u2 _number_of_breakpoints; // fullspeed debugging support - InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations - InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations - -#ifdef TIERED - float _rate; // Events (invocation and backedge counter increments) per millisecond - jlong _prev_time; // Previous time the rate was acquired -#endif #ifndef PRODUCT int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging) @@ -247,11 +240,31 @@ class Method : public Metadata { void clear_all_breakpoints(); // Tracking number of breakpoints, for fullspeed debugging. // Only mutated by VM thread. - u2 number_of_breakpoints() const { return _number_of_breakpoints; } - void incr_number_of_breakpoints() { ++_number_of_breakpoints; } - void decr_number_of_breakpoints() { --_number_of_breakpoints; } + u2 number_of_breakpoints() const { + if (method_counters() == NULL) { + return 0; + } else { + return method_counters()->number_of_breakpoints(); + } + } + void incr_number_of_breakpoints(TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->incr_number_of_breakpoints(); + } + } + void decr_number_of_breakpoints(TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->decr_number_of_breakpoints(); + } + } // Initialization only - void clear_number_of_breakpoints() { _number_of_breakpoints = 0; } + void clear_number_of_breakpoints() { + if (method_counters() != NULL) { + method_counters()->clear_number_of_breakpoints(); + } + } // index into InstanceKlass methods() array // note: also used by jfr @@ -288,14 +301,20 @@ class Method : public Metadata { void set_highest_osr_comp_level(int level); // Count of times method was exited via exception while interpreting - void interpreter_throwout_increment() { - if (_interpreter_throwout_count < 65534) { - _interpreter_throwout_count++; + void interpreter_throwout_increment(TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->interpreter_throwout_increment(); } } - int interpreter_throwout_count() const { return _interpreter_throwout_count; } - void set_interpreter_throwout_count(int count) { _interpreter_throwout_count = count; } + int interpreter_throwout_count() const { + if (method_counters() == NULL) { + return 0; + } else { + return method_counters()->interpreter_throwout_count(); + } + } // size of parameters int size_of_parameters() const { return constMethod()->size_of_parameters(); } @@ -339,23 +358,54 @@ class Method : public Metadata { MethodData* method_data() const { return _method_data; } + void set_method_data(MethodData* data) { _method_data = data; } - // invocation counter - InvocationCounter* invocation_counter() { return &_invocation_counter; } - InvocationCounter* backedge_counter() { return &_backedge_counter; } + MethodCounters* method_counters() const { + return _method_counters; + } + + + void set_method_counters(MethodCounters* counters) { + _method_counters = counters; + } #ifdef TIERED // We are reusing interpreter_invocation_count as a holder for the previous event count! // We can do that since interpreter_invocation_count is not used in tiered. - int prev_event_count() const { return _interpreter_invocation_count; } - void set_prev_event_count(int count) { _interpreter_invocation_count = count; } - jlong prev_time() const { return _prev_time; } - void set_prev_time(jlong time) { _prev_time = time; } - float rate() const { return _rate; } - void set_rate(float rate) { _rate = rate; } + int prev_event_count() const { + if (method_counters() == NULL) { + return 0; + } else { + return method_counters()->interpreter_invocation_count(); + } + } + void set_prev_event_count(int count, TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->set_interpreter_invocation_count(count); + } + } + jlong prev_time() const { + return method_counters() == NULL ? 0 : method_counters()->prev_time(); + } + void set_prev_time(jlong time, TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->set_prev_time(time); + } + } + float rate() const { + return method_counters() == NULL ? 0 : method_counters()->rate(); + } + void set_rate(float rate, TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->set_rate(rate); + } + } #endif int invocation_count(); @@ -366,14 +416,17 @@ class Method : public Metadata { static void build_interpreter_method_data(methodHandle method, TRAPS); + static MethodCounters* build_method_counters(Method* m, TRAPS); + int interpreter_invocation_count() { if (TieredCompilation) return invocation_count(); - else return _interpreter_invocation_count; + else return (method_counters() == NULL) ? 0 : + method_counters()->interpreter_invocation_count(); } - void set_interpreter_invocation_count(int count) { _interpreter_invocation_count = count; } - int increment_interpreter_invocation_count() { + int increment_interpreter_invocation_count(TRAPS) { if (TieredCompilation) ShouldNotReachHere(); - return ++_interpreter_invocation_count; + MethodCounters* mcs = get_method_counters(CHECK_0); + return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count(); } #ifndef PRODUCT @@ -582,12 +635,12 @@ class Method : public Metadata { #endif /* CC_INTERP */ static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); } static ByteSize code_offset() { return byte_offset_of(Method, _code); } - static ByteSize invocation_counter_offset() { return byte_offset_of(Method, _invocation_counter); } - static ByteSize backedge_counter_offset() { return byte_offset_of(Method, _backedge_counter); } static ByteSize method_data_offset() { return byte_offset_of(Method, _method_data); } - static ByteSize interpreter_invocation_counter_offset() { return byte_offset_of(Method, _interpreter_invocation_count); } + static ByteSize method_counters_offset() { + return byte_offset_of(Method, _method_counters); + } #ifndef PRODUCT static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(Method, _compiled_invocation_count); } #endif // not PRODUCT @@ -598,8 +651,6 @@ class Method : public Metadata { // for code generation static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); } - static int interpreter_invocation_counter_offset_in_bytes() - { return offset_of(Method, _interpreter_invocation_count); } static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); } static int intrinsic_id_size_in_bytes() { return sizeof(u1); } @@ -757,6 +808,13 @@ class Method : public Metadata { private: void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason); + MethodCounters* get_method_counters(TRAPS) { + if (_method_counters == NULL) { + build_method_counters(this, CHECK_AND_CLEAR_NULL); + } + return _method_counters; + } + public: bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); } void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); } diff --git a/hotspot/src/os/solaris/vm/chaitin_solaris.cpp b/hotspot/src/share/vm/oops/methodCounters.cpp similarity index 65% rename from hotspot/src/os/solaris/vm/chaitin_solaris.cpp rename to hotspot/src/share/vm/oops/methodCounters.cpp index 92a437f9683..53d3e682b77 100644 --- a/hotspot/src/os/solaris/vm/chaitin_solaris.cpp +++ b/hotspot/src/share/vm/oops/methodCounters.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -21,26 +21,17 @@ * questions. * */ - #include "precompiled.hpp" -#include "opto/chaitin.hpp" -#include "opto/machnode.hpp" +#include "oops/methodCounters.hpp" +#include "runtime/thread.inline.hpp" -void PhaseRegAlloc::pd_preallocate_hook() { - // no action +MethodCounters* MethodCounters::allocate(ClassLoaderData* loader_data, TRAPS) { + return new(loader_data, size(), false, THREAD) MethodCounters(); } -#ifdef ASSERT -void PhaseRegAlloc::pd_postallocate_verify_hook() { - // no action +void MethodCounters::clear_counters() { + invocation_counter()->reset(); + backedge_counter()->reset(); + set_interpreter_throwout_count(0); + set_interpreter_invocation_count(0); } -#endif - - -//Reconciliation History -// 1.1 99/02/12 15:35:26 chaitin_win32.cpp -// 1.2 99/02/18 15:38:56 chaitin_win32.cpp -// 1.4 99/03/09 10:37:48 chaitin_win32.cpp -// 1.6 99/03/25 11:07:44 chaitin_win32.cpp -// 1.8 99/06/22 16:38:58 chaitin_win32.cpp -//End diff --git a/hotspot/src/share/vm/oops/methodCounters.hpp b/hotspot/src/share/vm/oops/methodCounters.hpp new file mode 100644 index 00000000000..0a6c895b328 --- /dev/null +++ b/hotspot/src/share/vm/oops/methodCounters.hpp @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_METHODCOUNTERS_HPP +#define SHARE_VM_OOPS_METHODCOUNTERS_HPP + +#include "oops/metadata.hpp" +#include "interpreter/invocationCounter.hpp" + +class MethodCounters: public MetaspaceObj { + friend class VMStructs; + private: + int _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered) + u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting + u2 _number_of_breakpoints; // fullspeed debugging support + InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations + InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations + +#ifdef TIERED + float _rate; // Events (invocation and backedge counter increments) per millisecond + jlong _prev_time; // Previous time the rate was acquired +#endif + + MethodCounters() : _interpreter_invocation_count(0), + _interpreter_throwout_count(0), + _number_of_breakpoints(0) +#ifdef TIERED + , _rate(0), + _prev_time(0) +#endif + { + invocation_counter()->init(); + backedge_counter()->init(); + } + + public: + static MethodCounters* allocate(ClassLoaderData* loader_data, TRAPS); + + void deallocate_contents(ClassLoaderData* loader_data) {} + DEBUG_ONLY(bool on_stack() { return false; }) // for template + + static int size() { return sizeof(MethodCounters) / wordSize; } + + bool is_klass() const { return false; } + + void clear_counters(); + + int interpreter_invocation_count() { + return _interpreter_invocation_count; + } + void set_interpreter_invocation_count(int count) { + _interpreter_invocation_count = count; + } + int increment_interpreter_invocation_count() { + return ++_interpreter_invocation_count; + } + + void interpreter_throwout_increment() { + if (_interpreter_throwout_count < 65534) { + _interpreter_throwout_count++; + } + } + int interpreter_throwout_count() const { + return _interpreter_throwout_count; + } + void set_interpreter_throwout_count(int count) { + _interpreter_throwout_count = count; + } + + u2 number_of_breakpoints() const { return _number_of_breakpoints; } + void incr_number_of_breakpoints() { ++_number_of_breakpoints; } + void decr_number_of_breakpoints() { --_number_of_breakpoints; } + void clear_number_of_breakpoints() { _number_of_breakpoints = 0; } + +#ifdef TIERED + jlong prev_time() const { return _prev_time; } + void set_prev_time(jlong time) { _prev_time = time; } + float rate() const { return _rate; } + void set_rate(float rate) { _rate = rate; } +#endif + + // invocation counter + InvocationCounter* invocation_counter() { return &_invocation_counter; } + InvocationCounter* backedge_counter() { return &_backedge_counter; } + + static ByteSize interpreter_invocation_counter_offset() { + return byte_offset_of(MethodCounters, _interpreter_invocation_count); + } + + static ByteSize invocation_counter_offset() { + return byte_offset_of(MethodCounters, _invocation_counter); + } + + static ByteSize backedge_counter_offset() { + return byte_offset_of(MethodCounters, _backedge_counter); + } + + static int interpreter_invocation_counter_offset_in_bytes() { + return offset_of(MethodCounters, _interpreter_invocation_count); + } + +}; +#endif //SHARE_VM_OOPS_METHODCOUNTERS_HPP diff --git a/hotspot/src/share/vm/oops/methodData.cpp b/hotspot/src/share/vm/oops/methodData.cpp index d01775cc8f8..73f70bac9d3 100644 --- a/hotspot/src/share/vm/oops/methodData.cpp +++ b/hotspot/src/share/vm/oops/methodData.cpp @@ -732,14 +732,17 @@ int MethodData::mileage_of(Method* method) { } else { int iic = method->interpreter_invocation_count(); if (mileage < iic) mileage = iic; - InvocationCounter* ic = method->invocation_counter(); - InvocationCounter* bc = method->backedge_counter(); - int icval = ic->count(); - if (ic->carry()) icval += CompileThreshold; - if (mileage < icval) mileage = icval; - int bcval = bc->count(); - if (bc->carry()) bcval += CompileThreshold; - if (mileage < bcval) mileage = bcval; + MethodCounters* mcs = method->method_counters(); + if (mcs != NULL) { + InvocationCounter* ic = mcs->invocation_counter(); + InvocationCounter* bc = mcs->backedge_counter(); + int icval = ic->count(); + if (ic->carry()) icval += CompileThreshold; + if (mileage < icval) mileage = icval; + int bcval = bc->count(); + if (bc->carry()) bcval += CompileThreshold; + if (mileage < bcval) mileage = bcval; + } } return mileage; } diff --git a/hotspot/src/share/vm/oops/oop.cpp b/hotspot/src/share/vm/oops/oop.cpp index 43f227e4dc8..aed29da6796 100644 --- a/hotspot/src/share/vm/oops/oop.cpp +++ b/hotspot/src/share/vm/oops/oop.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -103,11 +103,17 @@ intptr_t oopDesc::slow_identity_hash() { // When String table needs to rehash unsigned int oopDesc::new_hash(jint seed) { + EXCEPTION_MARK; ResourceMark rm; int length; - jchar* chars = java_lang_String::as_unicode_string(this, length); - // Use alternate hashing algorithm on the string - return AltHashing::murmur3_32(seed, chars, length); + jchar* chars = java_lang_String::as_unicode_string(this, length, THREAD); + if (chars != NULL) { + // Use alternate hashing algorithm on the string + return AltHashing::murmur3_32(seed, chars, length); + } else { + vm_exit_out_of_memory(length, OOM_MALLOC_ERROR, "unable to create Unicode strings for String table rehash"); + return 0; + } } VerifyOopClosure VerifyOopClosure::verify_oop; diff --git a/hotspot/src/share/vm/opto/cfgnode.cpp b/hotspot/src/share/vm/opto/cfgnode.cpp index 12bccd9a538..68fcc9cbb8e 100644 --- a/hotspot/src/share/vm/opto/cfgnode.cpp +++ b/hotspot/src/share/vm/opto/cfgnode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1306,10 +1306,11 @@ static Node* is_cond_add(PhaseGVN *phase, PhiNode *phi, int true_path) { return NULL; Node *x = n2; - Node *y = n1->in(1); - if( n2 == n1->in(1) ) { + Node *y = NULL; + if( x == n1->in(1) ) { y = n1->in(2); - } else if( n2 == n1->in(1) ) { + } else if( x == n1->in(2) ) { + y = n1->in(1); } else return NULL; // Not so profitable if compare and add are constants diff --git a/hotspot/src/share/vm/opto/chaitin.cpp b/hotspot/src/share/vm/opto/chaitin.cpp index 0fbe723c44f..9d69b0f3b71 100644 --- a/hotspot/src/share/vm/opto/chaitin.cpp +++ b/hotspot/src/share/vm/opto/chaitin.cpp @@ -145,6 +145,72 @@ void LRG_List::extend( uint nidx, uint lidx ) { #define NUMBUCKS 3 +// Straight out of Tarjan's union-find algorithm +uint LiveRangeMap::find_compress(uint lrg) { + uint cur = lrg; + uint next = _uf_map[cur]; + while (next != cur) { // Scan chain of equivalences + assert( next < cur, "always union smaller"); + cur = next; // until find a fixed-point + next = _uf_map[cur]; + } + + // Core of union-find algorithm: update chain of + // equivalences to be equal to the root. + while (lrg != next) { + uint tmp = _uf_map[lrg]; + _uf_map.map(lrg, next); + lrg = tmp; + } + return lrg; +} + +// Reset the Union-Find map to identity +void LiveRangeMap::reset_uf_map(uint max_lrg_id) { + _max_lrg_id= max_lrg_id; + // Force the Union-Find mapping to be at least this large + _uf_map.extend(_max_lrg_id, 0); + // Initialize it to be the ID mapping. + for (uint i = 0; i < _max_lrg_id; ++i) { + _uf_map.map(i, i); + } +} + +// Make all Nodes map directly to their final live range; no need for +// the Union-Find mapping after this call. +void LiveRangeMap::compress_uf_map_for_nodes() { + // For all Nodes, compress mapping + uint unique = _names.Size(); + for (uint i = 0; i < unique; ++i) { + uint lrg = _names[i]; + uint compressed_lrg = find(lrg); + if (lrg != compressed_lrg) { + _names.map(i, compressed_lrg); + } + } +} + +// Like Find above, but no path compress, so bad asymptotic behavior +uint LiveRangeMap::find_const(uint lrg) const { + if (!lrg) { + return lrg; // Ignore the zero LRG + } + + // Off the end? This happens during debugging dumps when you got + // brand new live ranges but have not told the allocator yet. + if (lrg >= _max_lrg_id) { + return lrg; + } + + uint next = _uf_map[lrg]; + while (next != lrg) { // Scan chain of equivalences + assert(next < lrg, "always union smaller"); + lrg = next; // until find a fixed-point + next = _uf_map[lrg]; + } + return next; +} + //------------------------------Chaitin---------------------------------------- PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher) : PhaseRegAlloc(unique, cfg, matcher, @@ -153,13 +219,13 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher) #else NULL #endif - ), - _names(unique), _uf_map(unique), - _maxlrg(0), _live(0), - _spilled_once(Thread::current()->resource_area()), - _spilled_twice(Thread::current()->resource_area()), - _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0), - _oldphi(unique) + ) + , _lrg_map(unique) + , _live(0) + , _spilled_once(Thread::current()->resource_area()) + , _spilled_twice(Thread::current()->resource_area()) + , _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0) + , _oldphi(unique) #ifndef PRODUCT , _trace_spilling(TraceSpilling || C->method_has_option("TraceSpilling")) #endif @@ -168,7 +234,6 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher) _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg._outer_loop_freq); - uint i,j; // Build a list of basic blocks, sorted by frequency _blks = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks ); // Experiment with sorting strategies to speed compilation @@ -176,30 +241,30 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher) Block **buckets[NUMBUCKS]; // Array of buckets uint buckcnt[NUMBUCKS]; // Array of bucket counters double buckval[NUMBUCKS]; // Array of bucket value cutoffs - for( i = 0; i < NUMBUCKS; i++ ) { - buckets[i] = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks ); + for (uint i = 0; i < NUMBUCKS; i++) { + buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg._num_blocks); buckcnt[i] = 0; // Bump by three orders of magnitude each time cutoff *= 0.001; buckval[i] = cutoff; - for( j = 0; j < _cfg._num_blocks; j++ ) { + for (uint j = 0; j < _cfg._num_blocks; j++) { buckets[i][j] = NULL; } } // Sort blocks into buckets - for( i = 0; i < _cfg._num_blocks; i++ ) { - for( j = 0; j < NUMBUCKS; j++ ) { - if( (j == NUMBUCKS-1) || (_cfg._blocks[i]->_freq > buckval[j]) ) { + for (uint i = 0; i < _cfg._num_blocks; i++) { + for (uint j = 0; j < NUMBUCKS; j++) { + if ((j == NUMBUCKS - 1) || (_cfg._blocks[i]->_freq > buckval[j])) { // Assign block to end of list for appropriate bucket buckets[j][buckcnt[j]++] = _cfg._blocks[i]; - break; // kick out of inner loop + break; // kick out of inner loop } } } // Dump buckets into final block array uint blkcnt = 0; - for( i = 0; i < NUMBUCKS; i++ ) { - for( j = 0; j < buckcnt[i]; j++ ) { + for (uint i = 0; i < NUMBUCKS; i++) { + for (uint j = 0; j < buckcnt[i]; j++) { _blks[blkcnt++] = buckets[i][j]; } } @@ -207,6 +272,77 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher) assert(blkcnt == _cfg._num_blocks, "Block array not totally filled"); } +//------------------------------Union------------------------------------------ +// union 2 sets together. +void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) { + uint src = _lrg_map.find(src_n); + uint dst = _lrg_map.find(dst_n); + assert(src, ""); + assert(dst, ""); + assert(src < _lrg_map.max_lrg_id(), "oob"); + assert(dst < _lrg_map.max_lrg_id(), "oob"); + assert(src < dst, "always union smaller"); + _lrg_map.uf_map(dst, src); +} + +//------------------------------new_lrg---------------------------------------- +void PhaseChaitin::new_lrg(const Node *x, uint lrg) { + // Make the Node->LRG mapping + _lrg_map.extend(x->_idx,lrg); + // Make the Union-Find mapping an identity function + _lrg_map.uf_extend(lrg, lrg); +} + + +bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) { + Block *bcon = _cfg._bbs[con->_idx]; + uint cindex = bcon->find_node(con); + Node *con_next = bcon->_nodes[cindex+1]; + if (con_next->in(0) != con || !con_next->is_MachProj()) { + return false; // No MachProj's follow + } + + // Copy kills after the cloned constant + Node *kills = con_next->clone(); + kills->set_req(0, copy); + b->_nodes.insert(idx, kills); + _cfg._bbs.map(kills->_idx, b); + new_lrg(kills, max_lrg_id); + return true; +} + +//------------------------------compact---------------------------------------- +// Renumber the live ranges to compact them. Makes the IFG smaller. +void PhaseChaitin::compact() { + // Current the _uf_map contains a series of short chains which are headed + // by a self-cycle. All the chains run from big numbers to little numbers. + // The Find() call chases the chains & shortens them for the next Find call. + // We are going to change this structure slightly. Numbers above a moving + // wave 'i' are unchanged. Numbers below 'j' point directly to their + // compacted live range with no further chaining. There are no chains or + // cycles below 'i', so the Find call no longer works. + uint j=1; + uint i; + for (i = 1; i < _lrg_map.max_lrg_id(); i++) { + uint lr = _lrg_map.uf_live_range_id(i); + // Ignore unallocated live ranges + if (!lr) { + continue; + } + assert(lr <= i, ""); + _lrg_map.uf_map(i, ( lr == i ) ? j++ : _lrg_map.uf_live_range_id(lr)); + } + // Now change the Node->LR mapping to reflect the compacted names + uint unique = _lrg_map.size(); + for (i = 0; i < unique; i++) { + uint lrg_id = _lrg_map.live_range_id(i); + _lrg_map.map(i, _lrg_map.uf_live_range_id(lrg_id)); + } + + // Reset the Union-Find mapping + _lrg_map.reset_uf_map(j); +} + void PhaseChaitin::Register_Allocate() { // Above the OLD FP (and in registers) are the incoming arguments. Stack @@ -231,14 +367,12 @@ void PhaseChaitin::Register_Allocate() { // all copy-related live ranges low and then using the max copy-related // live range as a cut-off for LIVE and the IFG. In other words, I can // build a subset of LIVE and IFG just for copies. - PhaseLive live(_cfg,_names,&live_arena); + PhaseLive live(_cfg, _lrg_map.names(), &live_arena); // Need IFG for coalescing and coloring - PhaseIFG ifg( &live_arena ); + PhaseIFG ifg(&live_arena); _ifg = &ifg; - if (C->unique() > _names.Size()) _names.extend(C->unique()-1, 0); - // Come out of SSA world to the Named world. Assign (virtual) registers to // Nodes. Use the same register for all inputs and the output of PhiNodes // - effectively ending SSA form. This requires either coalescing live @@ -258,9 +392,9 @@ void PhaseChaitin::Register_Allocate() { _live = NULL; // Mark live as being not available rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); - ifg.init(_maxlrg); // Empty IFG + ifg.init(_lrg_map.max_lrg_id()); // Empty IFG gather_lrg_masks( false ); // Collect LRG masks - live.compute( _maxlrg ); // Compute liveness + live.compute(_lrg_map.max_lrg_id()); // Compute liveness _live = &live; // Mark LIVE as being available } @@ -270,19 +404,19 @@ void PhaseChaitin::Register_Allocate() { // across any GC point where the derived value is live. So this code looks // at all the GC points, and "stretches" the live range of any base pointer // to the GC point. - if( stretch_base_pointer_live_ranges(&live_arena) ) { - NOT_PRODUCT( Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler); ) + if (stretch_base_pointer_live_ranges(&live_arena)) { + NOT_PRODUCT(Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler);) // Since some live range stretched, I need to recompute live _live = NULL; rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); - ifg.init(_maxlrg); - gather_lrg_masks( false ); - live.compute( _maxlrg ); + ifg.init(_lrg_map.max_lrg_id()); + gather_lrg_masks(false); + live.compute(_lrg_map.max_lrg_id()); _live = &live; } // Create the interference graph using virtual copies - build_ifg_virtual( ); // Include stack slots this time + build_ifg_virtual(); // Include stack slots this time // Aggressive (but pessimistic) copy coalescing. // This pass works on virtual copies. Any virtual copies which are not @@ -296,8 +430,8 @@ void PhaseChaitin::Register_Allocate() { // given Node and search them for an instance, i.e., time O(#MaxLRG)). _ifg->SquareUp(); - PhaseAggressiveCoalesce coalesce( *this ); - coalesce.coalesce_driver( ); + PhaseAggressiveCoalesce coalesce(*this); + coalesce.coalesce_driver(); // Insert un-coalesced copies. Visit all Phis. Where inputs to a Phi do // not match the Phi itself, insert a copy. coalesce.insert_copies(_matcher); @@ -310,28 +444,36 @@ void PhaseChaitin::Register_Allocate() { _live = NULL; rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); - ifg.init(_maxlrg); + ifg.init(_lrg_map.max_lrg_id()); gather_lrg_masks( true ); - live.compute( _maxlrg ); + live.compute(_lrg_map.max_lrg_id()); _live = &live; } // Build physical interference graph uint must_spill = 0; - must_spill = build_ifg_physical( &live_arena ); + must_spill = build_ifg_physical(&live_arena); // If we have a guaranteed spill, might as well spill now - if( must_spill ) { - if( !_maxlrg ) return; + if (must_spill) { + if(!_lrg_map.max_lrg_id()) { + return; + } // Bail out if unique gets too large (ie - unique > MaxNodeLimit) C->check_node_count(10*must_spill, "out of nodes before split"); - if (C->failing()) return; - _maxlrg = Split(_maxlrg, &split_arena); // Split spilling LRG everywhere + if (C->failing()) { + return; + } + + uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere + _lrg_map.set_max_lrg_id(new_max_lrg_id); // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor) // or we failed to split C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split"); - if (C->failing()) return; + if (C->failing()) { + return; + } - NOT_PRODUCT( C->verify_graph_edges(); ) + NOT_PRODUCT(C->verify_graph_edges();) compact(); // Compact LRGs; return new lower max lrg @@ -340,23 +482,23 @@ void PhaseChaitin::Register_Allocate() { _live = NULL; rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); - ifg.init(_maxlrg); // Build a new interference graph + ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph gather_lrg_masks( true ); // Collect intersect mask - live.compute( _maxlrg ); // Compute LIVE + live.compute(_lrg_map.max_lrg_id()); // Compute LIVE _live = &live; } - build_ifg_physical( &live_arena ); + build_ifg_physical(&live_arena); _ifg->SquareUp(); _ifg->Compute_Effective_Degree(); // Only do conservative coalescing if requested - if( OptoCoalesce ) { + if (OptoCoalesce) { // Conservative (and pessimistic) copy coalescing of those spills - PhaseConservativeCoalesce coalesce( *this ); + PhaseConservativeCoalesce coalesce(*this); // If max live ranges greater than cutoff, don't color the stack. // This cutoff can be larger than below since it is only done once. - coalesce.coalesce_driver( ); + coalesce.coalesce_driver(); } - compress_uf_map_for_nodes(); + _lrg_map.compress_uf_map_for_nodes(); #ifdef ASSERT verify(&live_arena, true); @@ -390,13 +532,18 @@ void PhaseChaitin::Register_Allocate() { } } - if( !_maxlrg ) return; - _maxlrg = Split(_maxlrg, &split_arena); // Split spilling LRG everywhere + if (!_lrg_map.max_lrg_id()) { + return; + } + uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere + _lrg_map.set_max_lrg_id(new_max_lrg_id); // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor) - C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after split"); - if (C->failing()) return; + C->check_node_count(2 * NodeLimitFudgeFactor, "out of nodes after split"); + if (C->failing()) { + return; + } - compact(); // Compact LRGs; return new lower max lrg + compact(); // Compact LRGs; return new lower max lrg // Nuke the live-ness and interference graph and LiveRanGe info { @@ -404,26 +551,26 @@ void PhaseChaitin::Register_Allocate() { _live = NULL; rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); - ifg.init(_maxlrg); + ifg.init(_lrg_map.max_lrg_id()); // Create LiveRanGe array. // Intersect register masks for all USEs and DEFs - gather_lrg_masks( true ); - live.compute( _maxlrg ); + gather_lrg_masks(true); + live.compute(_lrg_map.max_lrg_id()); _live = &live; } - must_spill = build_ifg_physical( &live_arena ); + must_spill = build_ifg_physical(&live_arena); _ifg->SquareUp(); _ifg->Compute_Effective_Degree(); // Only do conservative coalescing if requested - if( OptoCoalesce ) { + if (OptoCoalesce) { // Conservative (and pessimistic) copy coalescing - PhaseConservativeCoalesce coalesce( *this ); + PhaseConservativeCoalesce coalesce(*this); // Check for few live ranges determines how aggressive coalesce is. - coalesce.coalesce_driver( ); + coalesce.coalesce_driver(); } - compress_uf_map_for_nodes(); + _lrg_map.compress_uf_map_for_nodes(); #ifdef ASSERT verify(&live_arena, true); #endif @@ -435,7 +582,7 @@ void PhaseChaitin::Register_Allocate() { // Select colors by re-inserting LRGs back into the IFG in reverse order. // Return whether or not something spills. - spills = Select( ); + spills = Select(); } // Count number of Simplify-Select trips per coloring success. @@ -452,9 +599,12 @@ void PhaseChaitin::Register_Allocate() { // max_reg is past the largest *register* used. // Convert that to a frame_slot number. - if( _max_reg <= _matcher._new_SP ) + if (_max_reg <= _matcher._new_SP) { _framesize = C->out_preserve_stack_slots(); - else _framesize = _max_reg -_matcher._new_SP; + } + else { + _framesize = _max_reg -_matcher._new_SP; + } assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough"); // This frame must preserve the required fp alignment @@ -462,8 +612,9 @@ void PhaseChaitin::Register_Allocate() { assert( _framesize >= 0 && _framesize <= 1000000, "sanity check" ); #ifndef PRODUCT _total_framesize += _framesize; - if( (int)_framesize > _max_framesize ) + if ((int)_framesize > _max_framesize) { _max_framesize = _framesize; + } #endif // Convert CISC spills @@ -475,15 +626,17 @@ void PhaseChaitin::Register_Allocate() { log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing()); } - if (C->failing()) return; + if (C->failing()) { + return; + } - NOT_PRODUCT( C->verify_graph_edges(); ) + NOT_PRODUCT(C->verify_graph_edges();) // Move important info out of the live_arena to longer lasting storage. - alloc_node_regs(_names.Size()); - for (uint i=0; i < _names.Size(); i++) { - if (_names[i]) { // Live range associated with Node? - LRG &lrg = lrgs(_names[i]); + alloc_node_regs(_lrg_map.size()); + for (uint i=0; i < _lrg_map.size(); i++) { + if (_lrg_map.live_range_id(i)) { // Live range associated with Node? + LRG &lrg = lrgs(_lrg_map.live_range_id(i)); if (!lrg.alive()) { set_bad(i); } else if (lrg.num_regs() == 1) { @@ -537,11 +690,11 @@ void PhaseChaitin::de_ssa() { Node *n = b->_nodes[j]; // Pre-color to the zero live range, or pick virtual register const RegMask &rm = n->out_RegMask(); - _names.map( n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0 ); + _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0); } } // Reset the Union-Find mapping to be identity - reset_uf_map(lr_counter); + _lrg_map.reset_uf_map(lr_counter); } @@ -551,7 +704,7 @@ void PhaseChaitin::de_ssa() { void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { // Nail down the frame pointer live range - uint fp_lrg = n2lidx(_cfg._root->in(1)->in(TypeFunc::FramePtr)); + uint fp_lrg = _lrg_map.live_range_id(_cfg._root->in(1)->in(TypeFunc::FramePtr)); lrgs(fp_lrg)._cost += 1e12; // Cost is infinite // For all blocks @@ -566,14 +719,14 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { uint idx = n->is_Copy(); // Get virtual register number, same as LiveRanGe index - uint vreg = n2lidx(n); + uint vreg = _lrg_map.live_range_id(n); LRG &lrg = lrgs(vreg); if( vreg ) { // No vreg means un-allocable (e.g. memory) // Collect has-copy bit if( idx ) { lrg._has_copy = 1; - uint clidx = n2lidx(n->in(idx)); + uint clidx = _lrg_map.live_range_id(n->in(idx)); LRG ©_src = lrgs(clidx); copy_src._has_copy = 1; } @@ -773,8 +926,10 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { } // Prepare register mask for each input for( uint k = input_edge_start; k < cnt; k++ ) { - uint vreg = n2lidx(n->in(k)); - if( !vreg ) continue; + uint vreg = _lrg_map.live_range_id(n->in(k)); + if (!vreg) { + continue; + } // If this instruction is CISC Spillable, add the flags // bit to its appropriate input @@ -857,7 +1012,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { } // end for all blocks // Final per-liverange setup - for (uint i2=0; i2<_maxlrg; i2++) { + for (uint i2 = 0; i2 < _lrg_map.max_lrg_id(); i2++) { LRG &lrg = lrgs(i2); assert(!lrg._is_vector || !lrg._fat_proj, "sanity"); if (lrg.num_regs() > 1 && !lrg._fat_proj) { @@ -879,7 +1034,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { // The bit is checked in Simplify. void PhaseChaitin::set_was_low() { #ifdef ASSERT - for( uint i = 1; i < _maxlrg; i++ ) { + for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) { int size = lrgs(i).num_regs(); uint old_was_lo = lrgs(i)._was_lo; lrgs(i)._was_lo = 0; @@ -913,7 +1068,7 @@ void PhaseChaitin::set_was_low() { // Compute cost/area ratio, in case we spill. Build the lo-degree list. void PhaseChaitin::cache_lrg_info( ) { - for( uint i = 1; i < _maxlrg; i++ ) { + for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) { LRG &lrg = lrgs(i); // Check for being of low degree: means we can be trivially colored. @@ -949,10 +1104,10 @@ void PhaseChaitin::Pre_Simplify( ) { // Warm up the lo-degree no-copy list int lo_no_copy = 0; - for( uint i = 1; i < _maxlrg; i++ ) { - if( (lrgs(i).lo_degree() && !lrgs(i)._has_copy) || + for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) { + if ((lrgs(i).lo_degree() && !lrgs(i)._has_copy) || !lrgs(i).alive() || - lrgs(i)._must_spill ) { + lrgs(i)._must_spill) { lrgs(i)._next = lo_no_copy; lo_no_copy = i; } @@ -1163,7 +1318,7 @@ static bool is_legal_reg(LRG &lrg, OptoReg::Name reg, int chunk) { OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) { // Check for "at_risk" LRG's - uint risk_lrg = Find(lrg._risk_bias); + uint risk_lrg = _lrg_map.find(lrg._risk_bias); if( risk_lrg != 0 ) { // Walk the colored neighbors of the "at_risk" candidate // Choose a color which is both legal and already taken by a neighbor @@ -1179,7 +1334,7 @@ OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) { } } - uint copy_lrg = Find(lrg._copy_bias); + uint copy_lrg = _lrg_map.find(lrg._copy_bias); if( copy_lrg != 0 ) { // If he has a color, if( !(*(_ifg->_yanked))[copy_lrg] ) { @@ -1423,10 +1578,10 @@ uint PhaseChaitin::Select( ) { void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) { if( _spilled_once.test(src->_idx) ) { _spilled_once.set(dst->_idx); - lrgs(Find(dst))._was_spilled1 = 1; + lrgs(_lrg_map.find(dst))._was_spilled1 = 1; if( _spilled_twice.test(src->_idx) ) { _spilled_twice.set(dst->_idx); - lrgs(Find(dst))._was_spilled2 = 1; + lrgs(_lrg_map.find(dst))._was_spilled2 = 1; } } } @@ -1471,7 +1626,7 @@ void PhaseChaitin::fixup_spills() { MachNode *mach = n->as_Mach(); inp = mach->operand_index(inp); Node *src = n->in(inp); // Value to load or store - LRG &lrg_cisc = lrgs( Find_const(src) ); + LRG &lrg_cisc = lrgs(_lrg_map.find_const(src)); OptoReg::Name src_reg = lrg_cisc.reg(); // Doubles record the HIGH register of an adjacent pair. src_reg = OptoReg::add(src_reg,1-lrg_cisc.num_regs()); @@ -1554,9 +1709,9 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive Block *startb = _cfg._bbs[C->top()->_idx]; startb->_nodes.insert(startb->find_node(C->top()), base ); _cfg._bbs.map( base->_idx, startb ); - assert (n2lidx(base) == 0, "should not have LRG yet"); + assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet"); } - if (n2lidx(base) == 0) { + if (_lrg_map.live_range_id(base) == 0) { new_lrg(base, maxlrg++); } assert(base->in(0) == _cfg._root && @@ -1566,7 +1721,7 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive } // Check for AddP-related opcodes - if( !derived->is_Phi() ) { + if (!derived->is_Phi()) { assert(derived->as_Mach()->ideal_Opcode() == Op_AddP, err_msg_res("but is: %s", derived->Name())); Node *base = derived->in(AddPNode::Base); derived_base_map[derived->_idx] = base; @@ -1629,9 +1784,9 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive // base pointer that is live across the Safepoint for oopmap building. The // edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the // required edge set. -bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) { +bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) { int must_recompute_live = false; - uint maxlrg = _maxlrg; + uint maxlrg = _lrg_map.max_lrg_id(); Node **derived_base_map = (Node**)a->Amalloc(sizeof(Node*)*C->unique()); memset( derived_base_map, 0, sizeof(Node*)*C->unique() ); @@ -1669,15 +1824,18 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) { } // Get value being defined - uint lidx = n2lidx(n); - if( lidx && lidx < _maxlrg /* Ignore the occasional brand-new live range */) { + uint lidx = _lrg_map.live_range_id(n); + // Ignore the occasional brand-new live range + if (lidx && lidx < _lrg_map.max_lrg_id()) { // Remove from live-out set liveout.remove(lidx); // Copies do not define a new value and so do not interfere. // Remove the copies source from the liveout set before interfering. uint idx = n->is_Copy(); - if( idx ) liveout.remove( n2lidx(n->in(idx)) ); + if (idx) { + liveout.remove(_lrg_map.live_range_id(n->in(idx))); + } } // Found a safepoint? @@ -1695,21 +1853,21 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) { derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity"); // If its an OOP with a non-zero offset, then it is derived. if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) { - Node *base = find_base_for_derived( derived_base_map, derived, maxlrg ); - assert( base->_idx < _names.Size(), "" ); + Node *base = find_base_for_derived(derived_base_map, derived, maxlrg); + assert(base->_idx < _lrg_map.size(), ""); // Add reaching DEFs of derived pointer and base pointer as a // pair of inputs - n->add_req( derived ); - n->add_req( base ); + n->add_req(derived); + n->add_req(base); // See if the base pointer is already live to this point. // Since I'm working on the SSA form, live-ness amounts to // reaching def's. So if I find the base's live range then // I know the base's def reaches here. - if( (n2lidx(base) >= _maxlrg ||// (Brand new base (hence not live) or - !liveout.member( n2lidx(base) ) ) && // not live) AND - (n2lidx(base) > 0) && // not a constant - _cfg._bbs[base->_idx] != b ) { // base not def'd in blk) + if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or + !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND + (_lrg_map.live_range_id(base) > 0) && // not a constant + _cfg._bbs[base->_idx] != b) { // base not def'd in blk) // Base pointer is not currently live. Since I stretched // the base pointer to here and it crosses basic-block // boundaries, the global live info is now incorrect. @@ -1721,11 +1879,12 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) { } // End of if found a GC point // Make all inputs live - if( !n->is_Phi() ) { // Phi function uses come from prior block - for( uint k = 1; k < n->req(); k++ ) { - uint lidx = n2lidx(n->in(k)); - if( lidx < _maxlrg ) - liveout.insert( lidx ); + if (!n->is_Phi()) { // Phi function uses come from prior block + for (uint k = 1; k < n->req(); k++) { + uint lidx = _lrg_map.live_range_id(n->in(k)); + if (lidx < _lrg_map.max_lrg_id()) { + liveout.insert(lidx); + } } } @@ -1733,11 +1892,12 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) { liveout.clear(); // Free the memory used by liveout. } // End of forall blocks - _maxlrg = maxlrg; + _lrg_map.set_max_lrg_id(maxlrg); // If I created a new live range I need to recompute live - if( maxlrg != _ifg->_maxlrg ) + if (maxlrg != _ifg->_maxlrg) { must_recompute_live = true; + } return must_recompute_live != 0; } @@ -1745,16 +1905,17 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) { //------------------------------add_reference---------------------------------- // Extend the node to LRG mapping -void PhaseChaitin::add_reference( const Node *node, const Node *old_node ) { - _names.extend( node->_idx, n2lidx(old_node) ); + +void PhaseChaitin::add_reference(const Node *node, const Node *old_node) { + _lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node)); } //------------------------------dump------------------------------------------- #ifndef PRODUCT -void PhaseChaitin::dump( const Node *n ) const { - uint r = (n->_idx < _names.Size() ) ? Find_const(n) : 0; +void PhaseChaitin::dump(const Node *n) const { + uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0; tty->print("L%d",r); - if( r && n->Opcode() != Op_Phi ) { + if (r && n->Opcode() != Op_Phi) { if( _node_regs ) { // Got a post-allocation copy of allocation? tty->print("["); OptoReg::Name second = get_reg_second(n); @@ -1775,11 +1936,13 @@ void PhaseChaitin::dump( const Node *n ) const { tty->print("/N%d\t",n->_idx); tty->print("%s === ", n->Name()); uint k; - for( k = 0; k < n->req(); k++) { + for (k = 0; k < n->req(); k++) { Node *m = n->in(k); - if( !m ) tty->print("_ "); + if (!m) { + tty->print("_ "); + } else { - uint r = (m->_idx < _names.Size() ) ? Find_const(m) : 0; + uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0; tty->print("L%d",r); // Data MultiNode's can have projections with no real registers. // Don't die while dumping them. @@ -1810,8 +1973,10 @@ void PhaseChaitin::dump( const Node *n ) const { if( k < n->len() && n->in(k) ) tty->print("| "); for( ; k < n->len(); k++ ) { Node *m = n->in(k); - if( !m ) break; - uint r = (m->_idx < _names.Size() ) ? Find_const(m) : 0; + if(!m) { + break; + } + uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0; tty->print("L%d",r); tty->print("/N%d ",m->_idx); } @@ -1839,7 +2004,7 @@ void PhaseChaitin::dump( const Block * b ) const { tty->print("{"); uint i; while ((i = elements.next()) != 0) { - tty->print("L%d ", Find_const(i)); + tty->print("L%d ", _lrg_map.find_const(i)); } tty->print_cr("}"); } @@ -1863,10 +2028,14 @@ void PhaseChaitin::dump() const { // Dump LRG array tty->print("--- Live RanGe Array ---\n"); - for(uint i2 = 1; i2 < _maxlrg; i2++ ) { + for (uint i2 = 1; i2 < _lrg_map.max_lrg_id(); i2++) { tty->print("L%d: ",i2); - if( i2 < _ifg->_maxlrg ) lrgs(i2).dump( ); - else tty->print_cr("new LRG"); + if (i2 < _ifg->_maxlrg) { + lrgs(i2).dump(); + } + else { + tty->print_cr("new LRG"); + } } tty->print_cr(""); @@ -1939,7 +2108,7 @@ char *PhaseChaitin::dump_register( const Node *n, char *buf ) const { // Post allocation, use direct mappings, no LRG info available print_reg( get_reg_first(n), this, buf ); } else { - uint lidx = Find_const(n); // Grab LRG number + uint lidx = _lrg_map.find_const(n); // Grab LRG number if( !_ifg ) { sprintf(buf,"L%d",lidx); // No register binding yet } else if( !lidx ) { // Special, not allocated value @@ -1968,7 +2137,7 @@ void PhaseChaitin::dump_for_spill_split_recycle() const { if( WizardMode && (PrintCompilation || PrintOpto) ) { // Display which live ranges need to be split and the allocator's state tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt); - for( uint bidx = 1; bidx < _maxlrg; bidx++ ) { + for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) { if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) { tty->print("L%d: ", bidx); lrgs(bidx).dump(); @@ -2099,14 +2268,17 @@ void PhaseChaitin::dump_bb( uint pre_order ) const { void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const { tty->print_cr("---dump of L%d---",lidx); - if( _ifg ) { - if( lidx >= _maxlrg ) { + if (_ifg) { + if (lidx >= _lrg_map.max_lrg_id()) { tty->print("Attempt to print live range index beyond max live range.\n"); return; } tty->print("L%d: ",lidx); - if( lidx < _ifg->_maxlrg ) lrgs(lidx).dump( ); - else tty->print_cr("new LRG"); + if (lidx < _ifg->_maxlrg) { + lrgs(lidx).dump(); + } else { + tty->print_cr("new LRG"); + } } if( _ifg && lidx < _ifg->_maxlrg) { tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx)); @@ -2121,8 +2293,8 @@ void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const { // For all instructions for( uint j = 0; j < b->_nodes.size(); j++ ) { Node *n = b->_nodes[j]; - if( Find_const(n) == lidx ) { - if( !dump_once++ ) { + if (_lrg_map.find_const(n) == lidx) { + if (!dump_once++) { tty->cr(); b->dump_head( &_cfg._bbs ); } @@ -2133,11 +2305,13 @@ void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const { uint cnt = n->req(); for( uint k = 1; k < cnt; k++ ) { Node *m = n->in(k); - if (!m) continue; // be robust in the dumper - if( Find_const(m) == lidx ) { - if( !dump_once++ ) { + if (!m) { + continue; // be robust in the dumper + } + if (_lrg_map.find_const(m) == lidx) { + if (!dump_once++) { tty->cr(); - b->dump_head( &_cfg._bbs ); + b->dump_head(&_cfg._bbs); } dump(n); } diff --git a/hotspot/src/share/vm/opto/chaitin.hpp b/hotspot/src/share/vm/opto/chaitin.hpp index fc8010b851e..3455005f330 100644 --- a/hotspot/src/share/vm/opto/chaitin.hpp +++ b/hotspot/src/share/vm/opto/chaitin.hpp @@ -265,18 +265,118 @@ public: int effective_degree( uint lidx ) const; }; -// TEMPORARILY REPLACED WITH COMMAND LINE FLAG +// The LiveRangeMap class is responsible for storing node to live range id mapping. +// Each node is mapped to a live range id (a virtual register). Nodes that are +// not considered for register allocation are given live range id 0. +class LiveRangeMap VALUE_OBJ_CLASS_SPEC { -//// !!!!! Magic Constants need to move into ad file -#ifdef SPARC -//#define FLOAT_PRESSURE 30 /* SFLT_REG_mask.Size() - 1 */ -//#define INT_PRESSURE 23 /* NOTEMP_I_REG_mask.Size() - 1 */ -#define FLOAT_INCREMENT(regs) regs -#else -//#define FLOAT_PRESSURE 6 -//#define INT_PRESSURE 6 -#define FLOAT_INCREMENT(regs) 1 -#endif +private: + + uint _max_lrg_id; + + // Union-find map. Declared as a short for speed. + // Indexed by live-range number, it returns the compacted live-range number + LRG_List _uf_map; + + // Map from Nodes to live ranges + LRG_List _names; + + // Straight out of Tarjan's union-find algorithm + uint find_compress(const Node *node) { + uint lrg_id = find_compress(_names[node->_idx]); + _names.map(node->_idx, lrg_id); + return lrg_id; + } + + uint find_compress(uint lrg); + +public: + + const LRG_List& names() { + return _names; + } + + uint max_lrg_id() const { + return _max_lrg_id; + } + + void set_max_lrg_id(uint max_lrg_id) { + _max_lrg_id = max_lrg_id; + } + + uint size() const { + return _names.Size(); + } + + uint live_range_id(uint idx) const { + return _names[idx]; + } + + uint live_range_id(const Node *node) const { + return _names[node->_idx]; + } + + uint uf_live_range_id(uint lrg_id) const { + return _uf_map[lrg_id]; + } + + void map(uint idx, uint lrg_id) { + _names.map(idx, lrg_id); + } + + void uf_map(uint dst_lrg_id, uint src_lrg_id) { + _uf_map.map(dst_lrg_id, src_lrg_id); + } + + void extend(uint idx, uint lrg_id) { + _names.extend(idx, lrg_id); + } + + void uf_extend(uint dst_lrg_id, uint src_lrg_id) { + _uf_map.extend(dst_lrg_id, src_lrg_id); + } + + LiveRangeMap(uint unique) + : _names(unique) + , _uf_map(unique) + , _max_lrg_id(0) {} + + uint find_id( const Node *n ) { + uint retval = live_range_id(n); + assert(retval == find(n),"Invalid node to lidx mapping"); + return retval; + } + + // Reset the Union-Find map to identity + void reset_uf_map(uint max_lrg_id); + + // Make all Nodes map directly to their final live range; no need for + // the Union-Find mapping after this call. + void compress_uf_map_for_nodes(); + + uint find(uint lidx) { + uint uf_lidx = _uf_map[lidx]; + return (uf_lidx == lidx) ? uf_lidx : find_compress(lidx); + } + + // Convert a Node into a Live Range Index - a lidx + uint find(const Node *node) { + uint lidx = live_range_id(node); + uint uf_lidx = _uf_map[lidx]; + return (uf_lidx == lidx) ? uf_lidx : find_compress(node); + } + + // Like Find above, but no path compress, so bad asymptotic behavior + uint find_const(uint lrg) const; + + // Like Find above, but no path compress, so bad asymptotic behavior + uint find_const(const Node *node) const { + if(node->_idx >= _names.Size()) { + return 0; // not mapped, usual for debug dump + } + return find_const(_names[node->_idx]); + } +}; //------------------------------Chaitin---------------------------------------- // Briggs-Chaitin style allocation, mostly. @@ -286,7 +386,6 @@ class PhaseChaitin : public PhaseRegAlloc { int _trip_cnt; int _alternate; - uint _maxlrg; // Max live range number LRG &lrgs(uint idx) const { return _ifg->lrgs(idx); } PhaseLive *_live; // Liveness, used in the interference graph PhaseIFG *_ifg; // Interference graph (for original chunk) @@ -294,16 +393,6 @@ class PhaseChaitin : public PhaseRegAlloc { VectorSet _spilled_once; // Nodes that have been spilled VectorSet _spilled_twice; // Nodes that have been spilled twice - LRG_List _names; // Map from Nodes to Live RanGes - - // Union-find map. Declared as a short for speed. - // Indexed by live-range number, it returns the compacted live-range number - LRG_List _uf_map; - // Reset the Union-Find map to identity - void reset_uf_map( uint maxlrg ); - // Remove the need for the Union-Find mapping - void compress_uf_map_for_nodes( ); - // Combine the Live Range Indices for these 2 Nodes into a single live // range. Future requests for any Node in either live range will // return the live range index for the combined live range. @@ -322,7 +411,34 @@ class PhaseChaitin : public PhaseRegAlloc { // Helper functions for Split() uint split_DEF( Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray splits, int slidx ); uint split_USE( Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray splits, int slidx ); - int clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ); + + bool clone_projs(Block *b, uint idx, Node *con, Node *copy, LiveRangeMap &lrg_map) { + bool found_projs = clone_projs_shared(b, idx, con, copy, lrg_map.max_lrg_id()); + + if(found_projs) { + uint max_lrg_id = lrg_map.max_lrg_id(); + lrg_map.set_max_lrg_id(max_lrg_id + 1); + } + + return found_projs; + } + + //------------------------------clone_projs------------------------------------ + // After cloning some rematerialized instruction, clone any MachProj's that + // follow it. Example: Intel zero is XOR, kills flags. Sparc FP constants + // use G3 as an address temp. + bool clone_projs(Block *b, uint idx, Node *con, Node *copy, uint &max_lrg_id) { + bool found_projs = clone_projs_shared(b, idx, con, copy, max_lrg_id); + + if(found_projs) { + max_lrg_id++; + } + + return found_projs; + } + + bool clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id); + Node *split_Rematerialize(Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray splits, int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru); // True if lidx is used before any real register is def'd in the block @@ -349,20 +465,11 @@ public: PhaseChaitin( uint unique, PhaseCFG &cfg, Matcher &matcher ); ~PhaseChaitin() {} - // Convert a Node into a Live Range Index - a lidx - uint Find( const Node *n ) { - uint lidx = n2lidx(n); - uint uf_lidx = _uf_map[lidx]; - return (uf_lidx == lidx) ? uf_lidx : Find_compress(n); - } - uint Find_const( uint lrg ) const; - uint Find_const( const Node *n ) const; + LiveRangeMap _lrg_map; // Do all the real work of allocate void Register_Allocate(); - uint n2lidx( const Node *n ) const { return _names[n->_idx]; } - float high_frequency_lrg() const { return _high_frequency_lrg; } #ifndef PRODUCT @@ -374,18 +481,6 @@ private: // all inputs to a PhiNode, effectively coalescing live ranges. Insert // copies as needed. void de_ssa(); - uint Find_compress( const Node *n ); - uint Find( uint lidx ) { - uint uf_lidx = _uf_map[lidx]; - return (uf_lidx == lidx) ? uf_lidx : Find_compress(lidx); - } - uint Find_compress( uint lidx ); - - uint Find_id( const Node *n ) { - uint retval = n2lidx(n); - assert(retval == Find(n),"Invalid node to lidx mapping"); - return retval; - } // Add edge between reg and everything in the vector. // Same as _ifg->add_vector(reg,live) EXCEPT use the RegMask diff --git a/hotspot/src/share/vm/opto/coalesce.cpp b/hotspot/src/share/vm/opto/coalesce.cpp index 0811ea061fe..74618fb410c 100644 --- a/hotspot/src/share/vm/opto/coalesce.cpp +++ b/hotspot/src/share/vm/opto/coalesce.cpp @@ -34,160 +34,12 @@ #include "opto/matcher.hpp" #include "opto/regmask.hpp" -//============================================================================= -//------------------------------reset_uf_map----------------------------------- -void PhaseChaitin::reset_uf_map( uint maxlrg ) { - _maxlrg = maxlrg; - // Force the Union-Find mapping to be at least this large - _uf_map.extend(_maxlrg,0); - // Initialize it to be the ID mapping. - for( uint i=0; i<_maxlrg; i++ ) - _uf_map.map(i,i); -} - -//------------------------------compress_uf_map-------------------------------- -// Make all Nodes map directly to their final live range; no need for -// the Union-Find mapping after this call. -void PhaseChaitin::compress_uf_map_for_nodes( ) { - // For all Nodes, compress mapping - uint unique = _names.Size(); - for( uint i=0; i _idx]); - _names.map(n->_idx,lrg); - return lrg; -} - -//------------------------------Find_const------------------------------------- -// Like Find above, but no path compress, so bad asymptotic behavior -uint PhaseChaitin::Find_const( uint lrg ) const { - if( !lrg ) return lrg; // Ignore the zero LRG - // Off the end? This happens during debugging dumps when you got - // brand new live ranges but have not told the allocator yet. - if( lrg >= _maxlrg ) return lrg; - uint next = _uf_map[lrg]; - while( next != lrg ) { // Scan chain of equivalences - assert( next < lrg, "always union smaller" ); - lrg = next; // until find a fixed-point - next = _uf_map[lrg]; - } - return next; -} - -//------------------------------Find------------------------------------------- -// Like Find above, but no path compress, so bad asymptotic behavior -uint PhaseChaitin::Find_const( const Node *n ) const { - if( n->_idx >= _names.Size() ) return 0; // not mapped, usual for debug dump - return Find_const( _names[n->_idx] ); -} - -//------------------------------Union------------------------------------------ -// union 2 sets together. -void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) { - uint src = Find(src_n); - uint dst = Find(dst_n); - assert( src, "" ); - assert( dst, "" ); - assert( src < _maxlrg, "oob" ); - assert( dst < _maxlrg, "oob" ); - assert( src < dst, "always union smaller" ); - _uf_map.map(dst,src); -} - -//------------------------------new_lrg---------------------------------------- -void PhaseChaitin::new_lrg( const Node *x, uint lrg ) { - // Make the Node->LRG mapping - _names.extend(x->_idx,lrg); - // Make the Union-Find mapping an identity function - _uf_map.extend(lrg,lrg); -} - -//------------------------------clone_projs------------------------------------ -// After cloning some rematerialized instruction, clone any MachProj's that -// follow it. Example: Intel zero is XOR, kills flags. Sparc FP constants -// use G3 as an address temp. -int PhaseChaitin::clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ) { - Block *bcon = _cfg._bbs[con->_idx]; - uint cindex = bcon->find_node(con); - Node *con_next = bcon->_nodes[cindex+1]; - if( con_next->in(0) != con || !con_next->is_MachProj() ) - return false; // No MachProj's follow - - // Copy kills after the cloned constant - Node *kills = con_next->clone(); - kills->set_req( 0, copy ); - b->_nodes.insert( idx, kills ); - _cfg._bbs.map( kills->_idx, b ); - new_lrg( kills, maxlrg++ ); - return true; -} - -//------------------------------compact---------------------------------------- -// Renumber the live ranges to compact them. Makes the IFG smaller. -void PhaseChaitin::compact() { - // Current the _uf_map contains a series of short chains which are headed - // by a self-cycle. All the chains run from big numbers to little numbers. - // The Find() call chases the chains & shortens them for the next Find call. - // We are going to change this structure slightly. Numbers above a moving - // wave 'i' are unchanged. Numbers below 'j' point directly to their - // compacted live range with no further chaining. There are no chains or - // cycles below 'i', so the Find call no longer works. - uint j=1; - uint i; - for( i=1; i < _maxlrg; i++ ) { - uint lr = _uf_map[i]; - // Ignore unallocated live ranges - if( !lr ) continue; - assert( lr <= i, "" ); - _uf_map.map(i, ( lr == i ) ? j++ : _uf_map[lr]); - } - if( false ) // PrintOptoCompactLiveRanges - printf("Compacted %d LRs from %d\n",i-j,i); - // Now change the Node->LR mapping to reflect the compacted names - uint unique = _names.Size(); - for( i=0; i print("L%d/N%d ",r,n->_idx); } @@ -235,9 +87,9 @@ void PhaseCoalesce::dump() const { //------------------------------combine_these_two------------------------------ // Combine the live ranges def'd by these 2 Nodes. N2 is an input to N1. -void PhaseCoalesce::combine_these_two( Node *n1, Node *n2 ) { - uint lr1 = _phc.Find(n1); - uint lr2 = _phc.Find(n2); +void PhaseCoalesce::combine_these_two(Node *n1, Node *n2) { + uint lr1 = _phc._lrg_map.find(n1); + uint lr2 = _phc._lrg_map.find(n2); if( lr1 != lr2 && // Different live ranges already AND !_phc._ifg->test_edge_sq( lr1, lr2 ) ) { // Do not interfere LRG *lrg1 = &_phc.lrgs(lr1); @@ -306,14 +158,18 @@ void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, ui // I am about to clobber the dst_name, so the copy must be inserted // after the last use. Last use is really first-use on a backwards scan. uint i = b->end_idx()-1; - while( 1 ) { + while(1) { Node *n = b->_nodes[i]; // Check for end of virtual copies; this is also the end of the // parallel renaming effort. - if( n->_idx < _unique ) break; + if (n->_idx < _unique) { + break; + } uint idx = n->is_Copy(); assert( idx || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" ); - if( idx && _phc.Find(n->in(idx)) == dst_name ) break; + if (idx && _phc._lrg_map.find(n->in(idx)) == dst_name) { + break; + } i--; } uint last_use_idx = i; @@ -324,24 +180,29 @@ void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, ui // There can be only 1 kill that exits any block and that is // the last kill. Thus it is the first kill on a backwards scan. i = b->end_idx()-1; - while( 1 ) { + while (1) { Node *n = b->_nodes[i]; // Check for end of virtual copies; this is also the end of the // parallel renaming effort. - if( n->_idx < _unique ) break; + if (n->_idx < _unique) { + break; + } assert( n->is_Copy() || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" ); - if( _phc.Find(n) == src_name ) { + if (_phc._lrg_map.find(n) == src_name) { kill_src_idx = i; break; } i--; } // Need a temp? Last use of dst comes after the kill of src? - if( last_use_idx >= kill_src_idx ) { + if (last_use_idx >= kill_src_idx) { // Need to break a cycle with a temp uint idx = copy->is_Copy(); Node *tmp = copy->clone(); - _phc.new_lrg(tmp,_phc._maxlrg++); + uint max_lrg_id = _phc._lrg_map.max_lrg_id(); + _phc.new_lrg(tmp, max_lrg_id); + _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1); + // Insert new temp between copy and source tmp ->set_req(idx,copy->in(idx)); copy->set_req(idx,tmp); @@ -359,14 +220,14 @@ void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, ui void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { // We do LRGs compressing and fix a liveout data only here since the other // place in Split() is guarded by the assert which we never hit. - _phc.compress_uf_map_for_nodes(); + _phc._lrg_map.compress_uf_map_for_nodes(); // Fix block's liveout data for compressed live ranges. - for(uint lrg = 1; lrg < _phc._maxlrg; lrg++ ) { - uint compressed_lrg = _phc.Find(lrg); - if( lrg != compressed_lrg ) { - for( uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++ ) { + for (uint lrg = 1; lrg < _phc._lrg_map.max_lrg_id(); lrg++) { + uint compressed_lrg = _phc._lrg_map.find(lrg); + if (lrg != compressed_lrg) { + for (uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++) { IndexSet *liveout = _phc._live->live(_phc._cfg._blocks[bidx]); - if( liveout->member(lrg) ) { + if (liveout->member(lrg)) { liveout->remove(lrg); liveout->insert(compressed_lrg); } @@ -392,8 +253,9 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { uint cidx = copy->is_Copy(); if( cidx ) { Node *def = copy->in(cidx); - if( _phc.Find(copy) == _phc.Find(def) ) - n->set_req(k,def); + if (_phc._lrg_map.find(copy) == _phc._lrg_map.find(def)) { + n->set_req(k, def); + } } } @@ -401,7 +263,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { uint cidx = n->is_Copy(); if( cidx ) { Node *def = n->in(cidx); - if( _phc.Find(n) == _phc.Find(def) ) { + if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) { n->replace_by(def); n->set_req(cidx,NULL); b->_nodes.remove(l); @@ -410,16 +272,18 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { } } - if( n->is_Phi() ) { + if (n->is_Phi()) { // Get the chosen name for the Phi - uint phi_name = _phc.Find( n ); + uint phi_name = _phc._lrg_map.find(n); // Ignore the pre-allocated specials - if( !phi_name ) continue; + if (!phi_name) { + continue; + } // Check for mismatch inputs to Phi - for( uint j = 1; j in(j); - uint src_name = _phc.Find(m); - if( src_name != phi_name ) { + uint src_name = _phc._lrg_map.find(m); + if (src_name != phi_name) { Block *pred = _phc._cfg._bbs[b->pred(j)->_idx]; Node *copy; assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach"); @@ -430,18 +294,18 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { // Insert the copy in the predecessor basic block pred->add_inst(copy); // Copy any flags as well - _phc.clone_projs( pred, pred->end_idx(), m, copy, _phc._maxlrg ); + _phc.clone_projs(pred, pred->end_idx(), m, copy, _phc._lrg_map); } else { const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()]; - copy = new (C) MachSpillCopyNode(m,*rm,*rm); + copy = new (C) MachSpillCopyNode(m, *rm, *rm); // Find a good place to insert. Kinda tricky, use a subroutine insert_copy_with_overlap(pred,copy,phi_name,src_name); } // Insert the copy in the use-def chain - n->set_req( j, copy ); + n->set_req(j, copy); _phc._cfg._bbs.map( copy->_idx, pred ); // Extend ("register allocate") the names array for the copy. - _phc._names.extend( copy->_idx, phi_name ); + _phc._lrg_map.extend(copy->_idx, phi_name); } // End of if Phi names do not match } // End of for all inputs to Phi } else { // End of if Phi @@ -450,39 +314,40 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { uint idx; if( n->is_Mach() && (idx=n->as_Mach()->two_adr()) ) { // Get the chosen name for the Node - uint name = _phc.Find( n ); - assert( name, "no 2-address specials" ); + uint name = _phc._lrg_map.find(n); + assert (name, "no 2-address specials"); // Check for name mis-match on the 2-address input Node *m = n->in(idx); - if( _phc.Find(m) != name ) { + if (_phc._lrg_map.find(m) != name) { Node *copy; assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach"); // At this point it is unsafe to extend live ranges (6550579). // Rematerialize only constants as we do for Phi above. - if( m->is_Mach() && m->as_Mach()->is_Con() && - m->as_Mach()->rematerialize() ) { + if(m->is_Mach() && m->as_Mach()->is_Con() && + m->as_Mach()->rematerialize()) { copy = m->clone(); // Insert the copy in the basic block, just before us - b->_nodes.insert( l++, copy ); - if( _phc.clone_projs( b, l, m, copy, _phc._maxlrg ) ) + b->_nodes.insert(l++, copy); + if(_phc.clone_projs(b, l, m, copy, _phc._lrg_map)) { l++; + } } else { const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()]; - copy = new (C) MachSpillCopyNode( m, *rm, *rm ); + copy = new (C) MachSpillCopyNode(m, *rm, *rm); // Insert the copy in the basic block, just before us - b->_nodes.insert( l++, copy ); + b->_nodes.insert(l++, copy); } // Insert the copy in the use-def chain - n->set_req(idx, copy ); + n->set_req(idx, copy); // Extend ("register allocate") the names array for the copy. - _phc._names.extend( copy->_idx, name ); + _phc._lrg_map.extend(copy->_idx, name); _phc._cfg._bbs.map( copy->_idx, b ); } } // End of is two-adr // Insert a copy at a debug use for a lrg which has high frequency - if( b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs) ) { + if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs)) { // Walk the debug inputs to the node and check for lrg freq JVMState* jvms = n->jvms(); uint debug_start = jvms ? jvms->debug_start() : 999999; @@ -490,9 +355,11 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { for(uint inpidx = debug_start; inpidx < debug_end; inpidx++) { // Do not split monitors; they are only needed for debug table // entries and need no code. - if( jvms->is_monitor_use(inpidx) ) continue; + if (jvms->is_monitor_use(inpidx)) { + continue; + } Node *inp = n->in(inpidx); - uint nidx = _phc.n2lidx(inp); + uint nidx = _phc._lrg_map.live_range_id(inp); LRG &lrg = lrgs(nidx); // If this lrg has a high frequency use/def @@ -519,8 +386,10 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { // Insert the copy in the basic block, just before us b->_nodes.insert( l++, copy ); // Extend ("register allocate") the names array for the copy. - _phc.new_lrg( copy, _phc._maxlrg++ ); - _phc._cfg._bbs.map( copy->_idx, b ); + uint max_lrg_id = _phc._lrg_map.max_lrg_id(); + _phc.new_lrg(copy, max_lrg_id); + _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1); + _phc._cfg._bbs.map(copy->_idx, b); //tty->print_cr("Split a debug use in Aggressive Coalesce"); } // End of if high frequency use/def } // End of for all debug inputs @@ -583,17 +452,17 @@ void PhaseAggressiveCoalesce::coalesce( Block *b ) { uint idx; // 2-address instructions have a virtual Copy matching their input // to their output - if( n->is_Mach() && (idx = n->as_Mach()->two_adr()) ) { + if (n->is_Mach() && (idx = n->as_Mach()->two_adr())) { MachNode *mach = n->as_Mach(); - combine_these_two( mach, mach->in(idx) ); + combine_these_two(mach, mach->in(idx)); } } // End of for all instructions in block } //============================================================================= //------------------------------PhaseConservativeCoalesce---------------------- -PhaseConservativeCoalesce::PhaseConservativeCoalesce( PhaseChaitin &chaitin ) : PhaseCoalesce(chaitin) { - _ulr.initialize(_phc._maxlrg); +PhaseConservativeCoalesce::PhaseConservativeCoalesce(PhaseChaitin &chaitin) : PhaseCoalesce(chaitin) { + _ulr.initialize(_phc._lrg_map.max_lrg_id()); } //------------------------------verify----------------------------------------- @@ -673,10 +542,14 @@ uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy, // Else work back one in copy chain prev_copy = prev_copy->in(prev_copy->is_Copy()); } else { // Else collect interferences - uint lidx = _phc.Find(x); + uint lidx = _phc._lrg_map.find(x); // Found another def of live-range being stretched? - if( lidx == lr1 ) return max_juint; - if( lidx == lr2 ) return max_juint; + if(lidx == lr1) { + return max_juint; + } + if(lidx == lr2) { + return max_juint; + } // If we attempt to coalesce across a bound def if( lrgs(lidx).is_bound() ) { @@ -751,33 +624,43 @@ static void record_bias( const PhaseIFG *ifg, int lr1, int lr2 ) { // See if I can coalesce a series of multiple copies together. I need the // final dest copy and the original src copy. They can be the same Node. // Compute the compatible register masks. -bool PhaseConservativeCoalesce::copy_copy( Node *dst_copy, Node *src_copy, Block *b, uint bindex ) { +bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block *b, uint bindex) { - if( !dst_copy->is_SpillCopy() ) return false; - if( !src_copy->is_SpillCopy() ) return false; + if (!dst_copy->is_SpillCopy()) { + return false; + } + if (!src_copy->is_SpillCopy()) { + return false; + } Node *src_def = src_copy->in(src_copy->is_Copy()); - uint lr1 = _phc.Find(dst_copy); - uint lr2 = _phc.Find(src_def ); + uint lr1 = _phc._lrg_map.find(dst_copy); + uint lr2 = _phc._lrg_map.find(src_def); // Same live ranges already? - if( lr1 == lr2 ) return false; + if (lr1 == lr2) { + return false; + } // Interfere? - if( _phc._ifg->test_edge_sq( lr1, lr2 ) ) return false; + if (_phc._ifg->test_edge_sq(lr1, lr2)) { + return false; + } // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK. - if( !lrgs(lr1)._is_oop && lrgs(lr2)._is_oop ) // not an oop->int cast + if (!lrgs(lr1)._is_oop && lrgs(lr2)._is_oop) { // not an oop->int cast return false; + } // Coalescing between an aligned live range and a mis-aligned live range? // No, no! Alignment changes how we count degree. - if( lrgs(lr1)._fat_proj != lrgs(lr2)._fat_proj ) + if (lrgs(lr1)._fat_proj != lrgs(lr2)._fat_proj) { return false; + } // Sort; use smaller live-range number Node *lr1_node = dst_copy; Node *lr2_node = src_def; - if( lr1 > lr2 ) { + if (lr1 > lr2) { uint tmp = lr1; lr1 = lr2; lr2 = tmp; lr1_node = src_def; lr2_node = dst_copy; } @@ -916,17 +799,5 @@ void PhaseConservativeCoalesce::coalesce( Block *b ) { PhaseChaitin::_conserv_coalesce++; // Collect stats on success continue; } - - /* do not attempt pairs. About 1/2 of all pairs can be removed by - post-alloc. The other set are too few to bother. - Node *copy2 = copy1->in(idx1); - uint idx2 = copy2->is_Copy(); - if( !idx2 ) continue; - if( copy_copy(copy1,copy2,b,i) ) { - i--; // Retry, same location in block - PhaseChaitin::_conserv_coalesce_pair++; // Collect stats on success - continue; - } - */ } } diff --git a/hotspot/src/share/vm/opto/coalesce.hpp b/hotspot/src/share/vm/opto/coalesce.hpp index 904ce7f679f..a6359af101c 100644 --- a/hotspot/src/share/vm/opto/coalesce.hpp +++ b/hotspot/src/share/vm/opto/coalesce.hpp @@ -41,23 +41,25 @@ protected: public: // Coalesce copies - PhaseCoalesce( PhaseChaitin &chaitin ) : Phase(Coalesce), _phc(chaitin) { } + PhaseCoalesce(PhaseChaitin &phc) + : Phase(Coalesce) + , _phc(phc) {} virtual void verify() = 0; // Coalesce copies - void coalesce_driver( ); + void coalesce_driver(); // Coalesce copies in this block - virtual void coalesce( Block *b ) = 0; + virtual void coalesce(Block *b) = 0; // Attempt to coalesce live ranges defined by these 2 - void combine_these_two( Node *n1, Node *n2 ); + void combine_these_two(Node *n1, Node *n2); - LRG &lrgs( uint lidx ) { return _phc.lrgs(lidx); } + LRG &lrgs(uint lidx) { return _phc.lrgs(lidx); } #ifndef PRODUCT // Dump internally name - void dump( Node *n ) const; + void dump(Node *n) const; // Dump whole shebang void dump() const; #endif diff --git a/hotspot/src/share/vm/opto/compile.cpp b/hotspot/src/share/vm/opto/compile.cpp index 37b4b4cd5c6..dd32b77f7b7 100644 --- a/hotspot/src/share/vm/opto/compile.cpp +++ b/hotspot/src/share/vm/opto/compile.cpp @@ -2127,22 +2127,19 @@ void Compile::Code_Gen() { } NOT_PRODUCT( verify_graph_edges(); ) - PhaseChaitin regalloc(unique(),cfg,m); + PhaseChaitin regalloc(unique(), cfg, m); _regalloc = ®alloc; { TracePhase t2("regalloc", &_t_registerAllocation, true); - // Perform any platform dependent preallocation actions. This is used, - // for example, to avoid taking an implicit null pointer exception - // using the frame pointer on win95. - _regalloc->pd_preallocate_hook(); - // Perform register allocation. After Chaitin, use-def chains are // no longer accurate (at spill code) and so must be ignored. // Node->LRG->reg mappings are still accurate. _regalloc->Register_Allocate(); // Bail out if the allocator builds too many nodes - if (failing()) return; + if (failing()) { + return; + } } // Prior to register allocation we kept empty basic blocks in case the @@ -2160,9 +2157,6 @@ void Compile::Code_Gen() { cfg.fixup_flow(); } - // Perform any platform dependent postallocation verifications. - debug_only( _regalloc->pd_postallocate_verify_hook(); ) - // Apply peephole optimizations if( OptoPeephole ) { NOT_PRODUCT( TracePhase t2("peephole", &_t_peephole, TimeCompiler); ) diff --git a/hotspot/src/share/vm/opto/graphKit.cpp b/hotspot/src/share/vm/opto/graphKit.cpp index 028020f51de..e2285916ce5 100644 --- a/hotspot/src/share/vm/opto/graphKit.cpp +++ b/hotspot/src/share/vm/opto/graphKit.cpp @@ -3564,7 +3564,8 @@ void GraphKit::g1_write_barrier_pre(bool do_load, Node* no_ctrl = NULL; Node* no_base = __ top(); - Node* zero = __ ConI(0); + Node* zero = __ ConI(0); + Node* zeroX = __ ConX(0); float likely = PROB_LIKELY(0.999); float unlikely = PROB_UNLIKELY(0.999); @@ -3590,7 +3591,9 @@ void GraphKit::g1_write_barrier_pre(bool do_load, // if (!marking) __ if_then(marking, BoolTest::ne, zero); { - Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw); + BasicType index_bt = TypeX_X->basic_type(); + assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size."); + Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw); if (do_load) { // load original value @@ -3603,22 +3606,16 @@ void GraphKit::g1_write_barrier_pre(bool do_load, Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); // is the queue for this thread full? - __ if_then(index, BoolTest::ne, zero, likely); { + __ if_then(index, BoolTest::ne, zeroX, likely); { // decrement the index - Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t))); - Node* next_indexX = next_index; -#ifdef _LP64 - // We could refine the type for what it's worth - // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue); - next_indexX = _gvn.transform( new (C) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) ); -#endif + Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t)))); // Now get the buffer location we will log the previous value into and store it - Node *log_addr = __ AddP(no_base, buffer, next_indexX); + Node *log_addr = __ AddP(no_base, buffer, next_index); __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw); // update the index - __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw); + __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw); } __ else_(); { @@ -3645,26 +3642,21 @@ void GraphKit::g1_mark_card(IdealKit& ideal, Node* buffer, const TypeFunc* tf) { - Node* zero = __ ConI(0); + Node* zero = __ ConI(0); + Node* zeroX = __ ConX(0); Node* no_base = __ top(); BasicType card_bt = T_BYTE; // Smash zero into card. MUST BE ORDERED WRT TO STORE __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw); // Now do the queue work - __ if_then(index, BoolTest::ne, zero); { + __ if_then(index, BoolTest::ne, zeroX); { - Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t))); - Node* next_indexX = next_index; -#ifdef _LP64 - // We could refine the type for what it's worth - // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue); - next_indexX = _gvn.transform( new (C) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) ); -#endif // _LP64 - Node* log_addr = __ AddP(no_base, buffer, next_indexX); + Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t)))); + Node* log_addr = __ AddP(no_base, buffer, next_index); __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw); - __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw); + __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw); } __ else_(); { __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread()); @@ -3725,7 +3717,7 @@ void GraphKit::g1_write_barrier_post(Node* oop_store, // Now some values // Use ctrl to avoid hoisting these values past a safepoint, which could // potentially reset these fields in the JavaThread. - Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw); + Node* index = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw); Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); // Convert the store obj pointer to an int prior to doing math on it diff --git a/hotspot/src/share/vm/opto/idealGraphPrinter.cpp b/hotspot/src/share/vm/opto/idealGraphPrinter.cpp index 1f811b8f4b8..e6909054324 100644 --- a/hotspot/src/share/vm/opto/idealGraphPrinter.cpp +++ b/hotspot/src/share/vm/opto/idealGraphPrinter.cpp @@ -616,7 +616,7 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) { buffer[0] = 0; _chaitin->dump_register(node, buffer); print_prop("reg", buffer); - print_prop("lrg", _chaitin->n2lidx(node)); + print_prop("lrg", _chaitin->_lrg_map.live_range_id(node)); } node->_in_dump_cnt--; diff --git a/hotspot/src/share/vm/opto/ifg.cpp b/hotspot/src/share/vm/opto/ifg.cpp index c40265214dc..96c0957cffb 100644 --- a/hotspot/src/share/vm/opto/ifg.cpp +++ b/hotspot/src/share/vm/opto/ifg.cpp @@ -286,15 +286,14 @@ void PhaseIFG::verify( const PhaseChaitin *pc ) const { uint idx; uint last = 0; while ((idx = elements.next()) != 0) { - assert( idx != i, "Must have empty diagonal"); - assert( pc->Find_const(idx) == idx, "Must not need Find" ); - assert( _adjs[idx].member(i), "IFG not square" ); - assert( !(*_yanked)[idx], "No yanked neighbors" ); - assert( last < idx, "not sorted increasing"); + assert(idx != i, "Must have empty diagonal"); + assert(pc->_lrg_map.find_const(idx) == idx, "Must not need Find"); + assert(_adjs[idx].member(i), "IFG not square"); + assert(!(*_yanked)[idx], "No yanked neighbors"); + assert(last < idx, "not sorted increasing"); last = idx; } - assert( !lrgs(i)._degree_valid || - effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong" ); + assert(!lrgs(i)._degree_valid || effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong"); } } #endif @@ -342,10 +341,10 @@ void PhaseChaitin::build_ifg_virtual( ) { Node *n = b->_nodes[j-1]; // Get value being defined - uint r = n2lidx(n); + uint r = _lrg_map.live_range_id(n); // Some special values do not allocate - if( r ) { + if (r) { // Remove from live-out set liveout->remove(r); @@ -353,16 +352,19 @@ void PhaseChaitin::build_ifg_virtual( ) { // Copies do not define a new value and so do not interfere. // Remove the copies source from the liveout set before interfering. uint idx = n->is_Copy(); - if( idx ) liveout->remove( n2lidx(n->in(idx)) ); + if (idx) { + liveout->remove(_lrg_map.live_range_id(n->in(idx))); + } // Interfere with everything live - interfere_with_live( r, liveout ); + interfere_with_live(r, liveout); } // Make all inputs live - if( !n->is_Phi() ) { // Phi function uses come from prior block - for( uint k = 1; k < n->req(); k++ ) - liveout->insert( n2lidx(n->in(k)) ); + if (!n->is_Phi()) { // Phi function uses come from prior block + for(uint k = 1; k < n->req(); k++) { + liveout->insert(_lrg_map.live_range_id(n->in(k))); + } } // 2-address instructions always have the defined value live @@ -394,11 +396,12 @@ void PhaseChaitin::build_ifg_virtual( ) { n->set_req( 2, tmp ); } // Defined value interferes with all inputs - uint lidx = n2lidx(n->in(idx)); - for( uint k = 1; k < n->req(); k++ ) { - uint kidx = n2lidx(n->in(k)); - if( kidx != lidx ) - _ifg->add_edge( r, kidx ); + uint lidx = _lrg_map.live_range_id(n->in(idx)); + for (uint k = 1; k < n->req(); k++) { + uint kidx = _lrg_map.live_range_id(n->in(k)); + if (kidx != lidx) { + _ifg->add_edge(r, kidx); + } } } } // End of forall instructions in block @@ -542,10 +545,10 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) { Node *n = b->_nodes[j - 1]; // Get value being defined - uint r = n2lidx(n); + uint r = _lrg_map.live_range_id(n); // Some special values do not allocate - if( r ) { + if(r) { // A DEF normally costs block frequency; rematerialized values are // removed from the DEF sight, so LOWER costs here. lrgs(r)._cost += n->rematerialize() ? 0 : b->_freq; @@ -556,9 +559,11 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) { Node *def = n->in(0); if( !n->is_Proj() || // Could also be a flags-projection of a dead ADD or such. - (n2lidx(def) && !liveout.member(n2lidx(def)) ) ) { + (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) { b->_nodes.remove(j - 1); - if( lrgs(r)._def == n ) lrgs(r)._def = 0; + if (lrgs(r)._def == n) { + lrgs(r)._def = 0; + } n->disconnect_inputs(NULL, C); _cfg._bbs.map(n->_idx,NULL); n->replace_by(C->top()); @@ -570,7 +575,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) { // Fat-projections kill many registers which cannot be used to // hold live ranges. - if( lrgs(r)._fat_proj ) { + if (lrgs(r)._fat_proj) { // Count the int-only registers RegMask itmp = lrgs(r).mask(); itmp.AND(*Matcher::idealreg2regmask[Op_RegI]); @@ -636,12 +641,12 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) { // Copies do not define a new value and so do not interfere. // Remove the copies source from the liveout set before interfering. uint idx = n->is_Copy(); - if( idx ) { - uint x = n2lidx(n->in(idx)); - if( liveout.remove( x ) ) { + if (idx) { + uint x = _lrg_map.live_range_id(n->in(idx)); + if (liveout.remove(x)) { lrgs(x)._area -= cost; // Adjust register pressure. - lower_pressure( &lrgs(x), j-1, b, pressure, hrp_index ); + lower_pressure(&lrgs(x), j-1, b, pressure, hrp_index); assert( pressure[0] == count_int_pressure (&liveout), "" ); assert( pressure[1] == count_float_pressure(&liveout), "" ); } @@ -727,18 +732,21 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) { // the flags and assumes it's dead. This keeps the (useless) // flag-setting behavior alive while also keeping the (useful) // memory update effect. - for( uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++ ) { + for (uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++) { Node *def = n->in(k); - uint x = n2lidx(def); - if( !x ) continue; + uint x = _lrg_map.live_range_id(def); + if (!x) { + continue; + } LRG &lrg = lrgs(x); // No use-side cost for spilling debug info - if( k < debug_start ) + if (k < debug_start) { // A USE costs twice block frequency (once for the Load, once // for a Load-delay). Rematerialized uses only cost once. lrg._cost += (def->rematerialize() ? b->_freq : (b->_freq + b->_freq)); + } // It is live now - if( liveout.insert( x ) ) { + if (liveout.insert(x)) { // Newly live things assumed live from here to top of block lrg._area += cost; // Adjust register pressure diff --git a/hotspot/src/share/vm/opto/live.cpp b/hotspot/src/share/vm/opto/live.cpp index 5da41891583..773dd1ea2e6 100644 --- a/hotspot/src/share/vm/opto/live.cpp +++ b/hotspot/src/share/vm/opto/live.cpp @@ -44,7 +44,7 @@ // block is put on the worklist. // The locally live-in stuff is computed once and added to predecessor // live-out sets. This separate compilation is done in the outer loop below. -PhaseLive::PhaseLive( const PhaseCFG &cfg, LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) { +PhaseLive::PhaseLive( const PhaseCFG &cfg, const LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) { } void PhaseLive::compute(uint maxlrg) { diff --git a/hotspot/src/share/vm/opto/live.hpp b/hotspot/src/share/vm/opto/live.hpp index 8a266c19067..c2ebe758cf8 100644 --- a/hotspot/src/share/vm/opto/live.hpp +++ b/hotspot/src/share/vm/opto/live.hpp @@ -80,7 +80,7 @@ class PhaseLive : public Phase { Block_List *_worklist; // Worklist for iterative solution const PhaseCFG &_cfg; // Basic blocks - LRG_List &_names; // Mapping from Nodes to live ranges + const LRG_List &_names; // Mapping from Nodes to live ranges uint _maxlrg; // Largest live-range number Arena *_arena; @@ -91,7 +91,7 @@ class PhaseLive : public Phase { void add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ); public: - PhaseLive( const PhaseCFG &cfg, LRG_List &names, Arena *arena ); + PhaseLive(const PhaseCFG &cfg, const LRG_List &names, Arena *arena); ~PhaseLive() {} // Compute liveness info void compute(uint maxlrg); diff --git a/hotspot/src/share/vm/opto/output.cpp b/hotspot/src/share/vm/opto/output.cpp index 178f3b717eb..f5a1e08e169 100644 --- a/hotspot/src/share/vm/opto/output.cpp +++ b/hotspot/src/share/vm/opto/output.cpp @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "asm/assembler.inline.hpp" +#include "code/compiledIC.hpp" #include "code/debugInfo.hpp" #include "code/debugInfoRec.hpp" #include "compiler/compileBroker.hpp" @@ -41,8 +42,6 @@ #include "runtime/handles.inline.hpp" #include "utilities/xmlstream.hpp" -extern uint size_java_to_interp(); -extern uint reloc_java_to_interp(); extern uint size_exception_handler(); extern uint size_deopt_handler(); @@ -389,15 +388,15 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size MachNode *mach = nj->as_Mach(); blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding reloc_size += mach->reloc(); - if( mach->is_MachCall() ) { + if (mach->is_MachCall()) { MachCallNode *mcall = mach->as_MachCall(); // This destination address is NOT PC-relative mcall->method_set((intptr_t)mcall->entry_point()); - if( mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method ) { - stub_size += size_java_to_interp(); - reloc_size += reloc_java_to_interp(); + if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) { + stub_size += CompiledStaticCall::to_interp_stub_size(); + reloc_size += CompiledStaticCall::reloc_to_interp_stub(); } } else if (mach->is_MachSafePoint()) { // If call/safepoint are adjacent, account for possible @@ -1044,21 +1043,6 @@ void NonSafepointEmitter::emit_non_safepoint() { debug_info->end_non_safepoint(pc_offset); } - - -// helper for fill_buffer bailout logic -static void turn_off_compiler(Compile* C) { - if (CodeCache::largest_free_block() >= CodeCacheMinimumFreeSpace*10) { - // Do not turn off compilation if a single giant method has - // blown the code cache size. - C->record_failure("excessive request to CodeCache"); - } else { - // Let CompilerBroker disable further compilations. - C->record_failure("CodeCache is full"); - } -} - - //------------------------------init_buffer------------------------------------ CodeBuffer* Compile::init_buffer(uint* blk_starts) { @@ -1158,7 +1142,7 @@ CodeBuffer* Compile::init_buffer(uint* blk_starts) { // Have we run out of code space? if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) { - turn_off_compiler(this); + C->record_failure("CodeCache is full"); return NULL; } // Configure the code buffer. @@ -1476,7 +1460,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) { // Verify that there is sufficient space remaining cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size); if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) { - turn_off_compiler(this); + C->record_failure("CodeCache is full"); return; } @@ -1633,7 +1617,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) { // One last check for failed CodeBuffer::expand: if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) { - turn_off_compiler(this); + C->record_failure("CodeCache is full"); return; } diff --git a/hotspot/src/share/vm/opto/parseHelper.cpp b/hotspot/src/share/vm/opto/parseHelper.cpp index f2a1bd2bef4..e9486088dc9 100644 --- a/hotspot/src/share/vm/opto/parseHelper.cpp +++ b/hotspot/src/share/vm/opto/parseHelper.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -337,19 +337,21 @@ void Parse::increment_and_test_invocation_counter(int limit) { if (!count_invocations()) return; // Get the Method* node. - const TypePtr* adr_type = TypeMetadataPtr::make(method()); - Node *method_node = makecon(adr_type); + ciMethod* m = method(); + address counters_adr = m->ensure_method_counters(); - // Load the interpreter_invocation_counter from the Method*. - int offset = Method::interpreter_invocation_counter_offset_in_bytes(); - Node* adr_node = basic_plus_adr(method_node, method_node, offset); - Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type); + Node* ctrl = control(); + const TypePtr* adr_type = TypeRawPtr::make(counters_adr); + Node *counters_node = makecon(adr_type); + Node* adr_iic_node = basic_plus_adr(counters_node, counters_node, + MethodCounters::interpreter_invocation_counter_offset_in_bytes()); + Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type); test_counter_against_threshold(cnt, limit); // Add one to the counter and store Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1))); - store_to_memory( NULL, adr_node, incr, T_INT, adr_type ); + store_to_memory( ctrl, adr_iic_node, incr, T_INT, adr_type ); } //----------------------------method_data_addressing--------------------------- diff --git a/hotspot/src/share/vm/opto/postaloc.cpp b/hotspot/src/share/vm/opto/postaloc.cpp index a27145b5c7b..c1b3fdbd231 100644 --- a/hotspot/src/share/vm/opto/postaloc.cpp +++ b/hotspot/src/share/vm/opto/postaloc.cpp @@ -56,7 +56,7 @@ bool PhaseChaitin::may_be_copy_of_callee( Node *def ) const { int i; for( i=0; i < limit; i++ ) { if( def->is_Proj() && def->in(0)->is_Start() && - _matcher.is_save_on_entry(lrgs(n2lidx(def)).reg()) ) + _matcher.is_save_on_entry(lrgs(_lrg_map.live_range_id(def)).reg())) return true; // Direct use of callee-save proj if( def->is_Copy() ) // Copies carry value through def = def->in(def->is_Copy()); @@ -83,7 +83,7 @@ int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_ // Count 1 if deleting an instruction from the current block if( oldb == current_block ) blk_adjust++; _cfg._bbs.map(old->_idx,NULL); - OptoReg::Name old_reg = lrgs(n2lidx(old)).reg(); + OptoReg::Name old_reg = lrgs(_lrg_map.live_range_id(old)).reg(); if( regnd && (*regnd)[old_reg]==old ) { // Instruction is currently available? value->map(old_reg,NULL); // Yank from value/regnd maps regnd->map(old_reg,NULL); // This register's value is now unknown @@ -164,7 +164,7 @@ int PhaseChaitin::use_prior_register( Node *n, uint idx, Node *def, Block *curre // Not every pair of physical registers are assignment compatible, // e.g. on sparc floating point registers are not assignable to integer // registers. - const LRG &def_lrg = lrgs(n2lidx(def)); + const LRG &def_lrg = lrgs(_lrg_map.live_range_id(def)); OptoReg::Name def_reg = def_lrg.reg(); const RegMask &use_mask = n->in_RegMask(idx); bool can_use = ( RegMask::can_represent(def_reg) ? (use_mask.Member(def_reg) != 0) @@ -209,11 +209,12 @@ int PhaseChaitin::use_prior_register( Node *n, uint idx, Node *def, Block *curre // Skip through any number of copies (that don't mod oop-i-ness) Node *PhaseChaitin::skip_copies( Node *c ) { int idx = c->is_Copy(); - uint is_oop = lrgs(n2lidx(c))._is_oop; + uint is_oop = lrgs(_lrg_map.live_range_id(c))._is_oop; while (idx != 0) { guarantee(c->in(idx) != NULL, "must not resurrect dead copy"); - if (lrgs(n2lidx(c->in(idx)))._is_oop != is_oop) + if (lrgs(_lrg_map.live_range_id(c->in(idx)))._is_oop != is_oop) { break; // casting copy, not the same value + } c = c->in(idx); idx = c->is_Copy(); } @@ -225,8 +226,8 @@ Node *PhaseChaitin::skip_copies( Node *c ) { int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &value, Node_List ®nd, bool can_change_regs ) { int blk_adjust = 0; - uint nk_idx = n2lidx(n->in(k)); - OptoReg::Name nk_reg = lrgs(nk_idx ).reg(); + uint nk_idx = _lrg_map.live_range_id(n->in(k)); + OptoReg::Name nk_reg = lrgs(nk_idx).reg(); // Remove obvious same-register copies Node *x = n->in(k); @@ -234,9 +235,13 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v while( (idx=x->is_Copy()) != 0 ) { Node *copy = x->in(idx); guarantee(copy != NULL, "must not resurrect dead copy"); - if( lrgs(n2lidx(copy)).reg() != nk_reg ) break; + if(lrgs(_lrg_map.live_range_id(copy)).reg() != nk_reg) { + break; + } blk_adjust += use_prior_register(n,k,copy,current_block,value,regnd); - if( n->in(k) != copy ) break; // Failed for some cutout? + if (n->in(k) != copy) { + break; // Failed for some cutout? + } x = copy; // Progress, try again } @@ -256,7 +261,7 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v if (val == x && nk_idx != 0 && regnd[nk_reg] != NULL && regnd[nk_reg] != x && - n2lidx(x) == n2lidx(regnd[nk_reg])) { + _lrg_map.live_range_id(x) == _lrg_map.live_range_id(regnd[nk_reg])) { // When rematerialzing nodes and stretching lifetimes, the // allocator will reuse the original def for multidef LRG instead // of the current reaching def because it can't know it's safe to @@ -270,7 +275,7 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v if (val == x) return blk_adjust; // No progress? int n_regs = RegMask::num_registers(val->ideal_reg()); - uint val_idx = n2lidx(val); + uint val_idx = _lrg_map.live_range_id(val); OptoReg::Name val_reg = lrgs(val_idx).reg(); // See if it happens to already be in the correct register! @@ -499,12 +504,12 @@ void PhaseChaitin::post_allocate_copy_removal() { for( j = 1; j < phi_dex; j++ ) { uint k; Node *phi = b->_nodes[j]; - uint pidx = n2lidx(phi); - OptoReg::Name preg = lrgs(n2lidx(phi)).reg(); + uint pidx = _lrg_map.live_range_id(phi); + OptoReg::Name preg = lrgs(_lrg_map.live_range_id(phi)).reg(); // Remove copies remaining on edges. Check for junk phi. Node *u = NULL; - for( k=1; k req(); k++ ) { + for (k = 1; k < phi->req(); k++) { Node *x = phi->in(k); if( phi != x && u != x ) // Found a different input u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input @@ -555,10 +560,10 @@ void PhaseChaitin::post_allocate_copy_removal() { // alive and well at the use (or else the allocator fubar'd). Take // advantage of this info to set a reaching def for the use-reg. uint k; - for( k = 1; k < n->req(); k++ ) { + for (k = 1; k < n->req(); k++) { Node *def = n->in(k); // n->in(k) is a USE; def is the DEF for this USE guarantee(def != NULL, "no disconnected nodes at this point"); - uint useidx = n2lidx(def); // useidx is the live range index for this USE + uint useidx = _lrg_map.live_range_id(def); // useidx is the live range index for this USE if( useidx ) { OptoReg::Name ureg = lrgs(useidx).reg(); @@ -566,7 +571,7 @@ void PhaseChaitin::post_allocate_copy_removal() { int idx; // Skip occasional useless copy while( (idx=def->is_Copy()) != 0 && def->in(idx) != NULL && // NULL should not happen - ureg == lrgs(n2lidx(def->in(idx))).reg() ) + ureg == lrgs(_lrg_map.live_range_id(def->in(idx))).reg()) def = def->in(idx); Node *valdef = skip_copies(def); // tighten up val through non-useless copies value.map(ureg,valdef); // record improved reaching-def info @@ -594,8 +599,10 @@ void PhaseChaitin::post_allocate_copy_removal() { j -= elide_copy( n, k, b, value, regnd, two_adr!=k ); // Unallocated Nodes define no registers - uint lidx = n2lidx(n); - if( !lidx ) continue; + uint lidx = _lrg_map.live_range_id(n); + if (!lidx) { + continue; + } // Update the register defined by this instruction OptoReg::Name nreg = lrgs(lidx).reg(); diff --git a/hotspot/src/share/vm/opto/reg_split.cpp b/hotspot/src/share/vm/opto/reg_split.cpp index 1695dee6008..edd614987ea 100644 --- a/hotspot/src/share/vm/opto/reg_split.cpp +++ b/hotspot/src/share/vm/opto/reg_split.cpp @@ -318,9 +318,13 @@ Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint for( uint i = 1; i < def->req(); i++ ) { Node *in = def->in(i); // Check for single-def (LRG cannot redefined) - uint lidx = n2lidx(in); - if( lidx >= _maxlrg ) continue; // Value is a recent spill-copy - if (lrgs(lidx).is_singledef()) continue; + uint lidx = _lrg_map.live_range_id(in); + if (lidx >= _lrg_map.max_lrg_id()) { + continue; // Value is a recent spill-copy + } + if (lrgs(lidx).is_singledef()) { + continue; + } Block *b_def = _cfg._bbs[def->_idx]; int idx_def = b_def->find_node(def); @@ -344,26 +348,28 @@ Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint if( spill->req() > 1 ) { for( uint i = 1; i < spill->req(); i++ ) { Node *in = spill->in(i); - uint lidx = Find_id(in); + uint lidx = _lrg_map.find_id(in); // Walk backwards thru spill copy node intermediates if (walkThru) { - while ( in->is_SpillCopy() && lidx >= _maxlrg ) { + while (in->is_SpillCopy() && lidx >= _lrg_map.max_lrg_id()) { in = in->in(1); - lidx = Find_id(in); + lidx = _lrg_map.find_id(in); } - if (lidx < _maxlrg && lrgs(lidx).is_multidef()) { + if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_multidef()) { // walkThru found a multidef LRG, which is unsafe to use, so // just keep the original def used in the clone. in = spill->in(i); - lidx = Find_id(in); + lidx = _lrg_map.find_id(in); } } - if( lidx < _maxlrg && lrgs(lidx).reg() >= LRG::SPILL_REG ) { + if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).reg() >= LRG::SPILL_REG) { Node *rdef = Reachblock[lrg2reach[lidx]]; - if( rdef ) spill->set_req(i,rdef); + if (rdef) { + spill->set_req(i, rdef); + } } } } @@ -382,7 +388,7 @@ Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint #endif // See if the cloned def kills any flags, and copy those kills as well uint i = insidx+1; - if( clone_projs( b, i, def, spill, maxlrg ) ) { + if( clone_projs( b, i, def, spill, maxlrg) ) { // Adjust the point where we go hi-pressure if( i <= b->_ihrp_index ) b->_ihrp_index++; if( i <= b->_fhrp_index ) b->_fhrp_index++; @@ -424,17 +430,25 @@ bool PhaseChaitin::is_high_pressure( Block *b, LRG *lrg, uint insidx ) { //------------------------------prompt_use--------------------------------- // True if lidx is used before any real register is def'd in the block bool PhaseChaitin::prompt_use( Block *b, uint lidx ) { - if( lrgs(lidx)._was_spilled2 ) return false; + if (lrgs(lidx)._was_spilled2) { + return false; + } // Scan block for 1st use. for( uint i = 1; i <= b->end_idx(); i++ ) { Node *n = b->_nodes[i]; // Ignore PHI use, these can be up or down - if( n->is_Phi() ) continue; - for( uint j = 1; j < n->req(); j++ ) - if( Find_id(n->in(j)) == lidx ) + if (n->is_Phi()) { + continue; + } + for (uint j = 1; j < n->req(); j++) { + if (_lrg_map.find_id(n->in(j)) == lidx) { return true; // Found 1st use! - if( n->out_RegMask().is_NotEmpty() ) return false; + } + } + if (n->out_RegMask().is_NotEmpty()) { + return false; + } } return false; } @@ -464,23 +478,23 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { bool u1, u2, u3; Block *b, *pred; PhiNode *phi; - GrowableArray lidxs(split_arena, _maxlrg, 0, 0); + GrowableArray lidxs(split_arena, maxlrg, 0, 0); // Array of counters to count splits per live range - GrowableArray splits(split_arena, _maxlrg, 0, 0); + GrowableArray splits(split_arena, maxlrg, 0, 0); #define NEW_SPLIT_ARRAY(type, size)\ (type*) split_arena->allocate_bytes((size) * sizeof(type)) //----------Setup Code---------- // Create a convenient mapping from lrg numbers to reaches/leaves indices - uint *lrg2reach = NEW_SPLIT_ARRAY( uint, _maxlrg ); + uint *lrg2reach = NEW_SPLIT_ARRAY(uint, maxlrg); // Keep track of DEFS & Phis for later passes defs = new Node_List(); phis = new Node_List(); // Gather info on which LRG's are spilling, and build maps - for( bidx = 1; bidx < _maxlrg; bidx++ ) { - if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) { + for (bidx = 1; bidx < maxlrg; bidx++) { + if (lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG) { assert(!lrgs(bidx).mask().is_AllStack(),"AllStack should color"); lrg2reach[bidx] = spill_cnt; spill_cnt++; @@ -629,7 +643,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { break; } // must be looking at a phi - if( Find_id(n1) == lidxs.at(slidx) ) { + if (_lrg_map.find_id(n1) == lidxs.at(slidx)) { // found the necessary phi needs_phi = false; has_phi = true; @@ -651,11 +665,11 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { Reachblock[slidx] = phi; // add node to block & node_to_block mapping - insert_proj( b, insidx++, phi, maxlrg++ ); + insert_proj(b, insidx++, phi, maxlrg++); non_phi++; // Reset new phi's mapping to be the spilling live range - _names.map(phi->_idx, lidx); - assert(Find_id(phi) == lidx,"Bad update on Union-Find mapping"); + _lrg_map.map(phi->_idx, lidx); + assert(_lrg_map.find_id(phi) == lidx, "Bad update on Union-Find mapping"); } // end if not found correct phi // Here you have either found or created the Phi, so record it assert(phi != NULL,"Must have a Phi Node here"); @@ -721,12 +735,12 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { Node *n = b->_nodes[insidx]; // Find the defining Node's live range index - uint defidx = Find_id(n); + uint defidx = _lrg_map.find_id(n); uint cnt = n->req(); - if( n->is_Phi() ) { + if (n->is_Phi()) { // Skip phi nodes after removing dead copies. - if( defidx < _maxlrg ) { + if (defidx < _lrg_map.max_lrg_id()) { // Check for useless Phis. These appear if we spill, then // coalesce away copies. Dont touch Phis in spilling live // ranges; they are busy getting modifed in this pass. @@ -744,8 +758,8 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { } } assert( u, "at least 1 valid input expected" ); - if( i >= cnt ) { // Found one unique input - assert(Find_id(n) == Find_id(u), "should be the same lrg"); + if (i >= cnt) { // Found one unique input + assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg"); n->replace_by(u); // Then replace with unique input n->disconnect_inputs(NULL, C); b->_nodes.remove(insidx); @@ -793,16 +807,24 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { while( insert_point > 0 ) { Node *n = b->_nodes[insert_point]; // Hit top of block? Quit going backwards - if( n->is_Phi() ) break; + if (n->is_Phi()) { + break; + } // Found a def? Better split after it. - if( n2lidx(n) == lidx ) break; + if (_lrg_map.live_range_id(n) == lidx) { + break; + } // Look for a use uint i; - for( i = 1; i < n->req(); i++ ) - if( n2lidx(n->in(i)) == lidx ) + for( i = 1; i < n->req(); i++ ) { + if (_lrg_map.live_range_id(n->in(i)) == lidx) { break; + } + } // Found a use? Better split after it. - if( i < n->req() ) break; + if (i < n->req()) { + break; + } insert_point--; } uint orig_eidx = b->end_idx(); @@ -812,8 +834,9 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { return 0; } // Spill of NULL check mem op goes into the following block. - if (b->end_idx() > orig_eidx) + if (b->end_idx() > orig_eidx) { insidx++; + } } // This is a new DEF, so update UP UPblock[slidx] = false; @@ -832,13 +855,13 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { } // end if crossing HRP Boundry // If the LRG index is oob, then this is a new spillcopy, skip it. - if( defidx >= _maxlrg ) { + if (defidx >= _lrg_map.max_lrg_id()) { continue; } LRG &deflrg = lrgs(defidx); uint copyidx = n->is_Copy(); // Remove coalesced copy from CFG - if( copyidx && defidx == n2lidx(n->in(copyidx)) ) { + if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) { n->replace_by( n->in(copyidx) ); n->set_req( copyidx, NULL ); b->_nodes.remove(insidx--); @@ -864,13 +887,13 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { // If inpidx > old_last, then one of these new inputs is being // handled. Skip the derived part of the pair, but process // the base like any other input. - if( inpidx > old_last && ((inpidx - oopoff) & 1) == DERIVED ) { + if (inpidx > old_last && ((inpidx - oopoff) & 1) == DERIVED) { continue; // skip derived_debug added below } // Get lidx of input - uint useidx = Find_id(n->in(inpidx)); + uint useidx = _lrg_map.find_id(n->in(inpidx)); // Not a brand-new split, and it is a spill use - if( useidx < _maxlrg && lrgs(useidx).reg() >= LRG::SPILL_REG ) { + if (useidx < _lrg_map.max_lrg_id() && lrgs(useidx).reg() >= LRG::SPILL_REG) { // Check for valid reaching DEF slidx = lrg2reach[useidx]; Node *def = Reachblock[slidx]; @@ -886,7 +909,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { if (def == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { return 0; } - _names.extend(def->_idx,0); + _lrg_map.extend(def->_idx, 0); _cfg._bbs.map(def->_idx,b); n->set_req(inpidx, def); continue; @@ -1186,10 +1209,10 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { // ********** Split Left Over Mem-Mem Moves ********** // Check for mem-mem copies and split them now. Do not do this // to copies about to be spilled; they will be Split shortly. - if( copyidx ) { + if (copyidx) { Node *use = n->in(copyidx); - uint useidx = Find_id(use); - if( useidx < _maxlrg && // This is not a new split + uint useidx = _lrg_map.find_id(use); + if (useidx < _lrg_map.max_lrg_id() && // This is not a new split OptoReg::is_stack(deflrg.reg()) && deflrg.reg() < LRG::SPILL_REG ) { // And DEF is from stack LRG &uselrg = lrgs(useidx); @@ -1228,7 +1251,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { uint member; IndexSetIterator isi(liveout); while ((member = isi.next()) != 0) { - assert(defidx != Find_const(member), "Live out member has not been compressed"); + assert(defidx != _lrg_map.find_const(member), "Live out member has not been compressed"); } #endif Reachblock[slidx] = NULL; @@ -1261,7 +1284,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { assert(phi->is_Phi(),"This list must only contain Phi Nodes"); Block *b = _cfg._bbs[phi->_idx]; // Grab the live range number - uint lidx = Find_id(phi); + uint lidx = _lrg_map.find_id(phi); uint slidx = lrg2reach[lidx]; // Update node to lidx map new_lrg(phi, maxlrg++); @@ -1296,11 +1319,13 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { int insert = pred->end_idx(); while (insert >= 1 && pred->_nodes[insert - 1]->is_SpillCopy() && - Find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) { + _lrg_map.find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) { insert--; } - def = split_Rematerialize( def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false ); - if( !def ) return 0; // Bail out + def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false); + if (!def) { + return 0; // Bail out + } } // Update the Phi's input edge array phi->set_req(i,def); @@ -1316,7 +1341,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { } // End for all inputs to the Phi } // End for all Phi Nodes // Update _maxlrg to save Union asserts - _maxlrg = maxlrg; + _lrg_map.set_max_lrg_id(maxlrg); //----------PASS 3---------- @@ -1328,47 +1353,51 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { for( uint i = 1; i < phi->req(); i++ ) { // Grab the input node Node *n = phi->in(i); - assert( n, "" ); - uint lidx = Find(n); - uint pidx = Find(phi); - if( lidx < pidx ) + assert(n, "node should exist"); + uint lidx = _lrg_map.find(n); + uint pidx = _lrg_map.find(phi); + if (lidx < pidx) { Union(n, phi); - else if( lidx > pidx ) + } + else if(lidx > pidx) { Union(phi, n); + } } // End for all inputs to the Phi Node } // End for all Phi Nodes // Now union all two address instructions - for( insidx = 0; insidx < defs->size(); insidx++ ) { + for (insidx = 0; insidx < defs->size(); insidx++) { // Grab the def n1 = defs->at(insidx); // Set new lidx for DEF & handle 2-addr instructions - if( n1->is_Mach() && ((twoidx = n1->as_Mach()->two_adr()) != 0) ) { - assert( Find(n1->in(twoidx)) < maxlrg,"Assigning bad live range index"); + if (n1->is_Mach() && ((twoidx = n1->as_Mach()->two_adr()) != 0)) { + assert(_lrg_map.find(n1->in(twoidx)) < maxlrg,"Assigning bad live range index"); // Union the input and output live ranges - uint lr1 = Find(n1); - uint lr2 = Find(n1->in(twoidx)); - if( lr1 < lr2 ) + uint lr1 = _lrg_map.find(n1); + uint lr2 = _lrg_map.find(n1->in(twoidx)); + if (lr1 < lr2) { Union(n1, n1->in(twoidx)); - else if( lr1 > lr2 ) + } + else if (lr1 > lr2) { Union(n1->in(twoidx), n1); + } } // End if two address } // End for all defs // DEBUG #ifdef ASSERT // Validate all live range index assignments - for( bidx = 0; bidx < _cfg._num_blocks; bidx++ ) { + for (bidx = 0; bidx < _cfg._num_blocks; bidx++) { b = _cfg._blocks[bidx]; - for( insidx = 0; insidx <= b->end_idx(); insidx++ ) { + for (insidx = 0; insidx <= b->end_idx(); insidx++) { Node *n = b->_nodes[insidx]; - uint defidx = Find(n); - assert(defidx < _maxlrg,"Bad live range index in Split"); + uint defidx = _lrg_map.find(n); + assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split"); assert(defidx < maxlrg,"Bad live range index in Split"); } } // Issue a warning if splitting made no progress int noprogress = 0; - for( slidx = 0; slidx < spill_cnt; slidx++ ) { - if( PrintOpto && WizardMode && splits.at(slidx) == 0 ) { + for (slidx = 0; slidx < spill_cnt; slidx++) { + if (PrintOpto && WizardMode && splits.at(slidx) == 0) { tty->print_cr("Failed to split live range %d", lidxs.at(slidx)); //BREAKPOINT; } diff --git a/hotspot/src/share/vm/opto/regalloc.hpp b/hotspot/src/share/vm/opto/regalloc.hpp index d0a993e57c4..9bea94be5de 100644 --- a/hotspot/src/share/vm/opto/regalloc.hpp +++ b/hotspot/src/share/vm/opto/regalloc.hpp @@ -113,7 +113,7 @@ public: OptoReg::Name offset2reg( int stk_offset ) const; // Get the register encoding associated with the Node - int get_encode( const Node *n ) const { + int get_encode(const Node *n) const { assert( n->_idx < _node_regs_max_index, "Exceeded _node_regs array"); OptoReg::Name first = _node_regs[n->_idx].first(); OptoReg::Name second = _node_regs[n->_idx].second(); @@ -122,15 +122,6 @@ public: return Matcher::_regEncode[first]; } - // Platform dependent hook for actions prior to allocation - void pd_preallocate_hook(); - -#ifdef ASSERT - // Platform dependent hook for verification after allocation. Will - // only get called when compiling with asserts. - void pd_postallocate_verify_hook(); -#endif - #ifndef PRODUCT static int _total_framesize; static int _max_framesize; diff --git a/hotspot/src/share/vm/prims/jvmtiEnv.cpp b/hotspot/src/share/vm/prims/jvmtiEnv.cpp index 318fe4e0b7e..f24c80f4339 100644 --- a/hotspot/src/share/vm/prims/jvmtiEnv.cpp +++ b/hotspot/src/share/vm/prims/jvmtiEnv.cpp @@ -259,7 +259,8 @@ JvmtiEnv::RetransformClasses(jint class_count, const jclass* classes) { // bytes to the InstanceKlass here because they have not been // validated and we're not at a safepoint. constantPoolHandle constants(current_thread, ikh->constants()); - MonitorLockerEx ml(constants->lock()); // lock constant pool while we query it + oop cplock = constants->lock(); + ObjectLocker ol(cplock, current_thread, cplock != NULL); // lock constant pool while we query it JvmtiClassFileReconstituter reconstituter(ikh); if (reconstituter.get_error() != JVMTI_ERROR_NONE) { @@ -2417,7 +2418,8 @@ JvmtiEnv::GetConstantPool(oop k_mirror, jint* constant_pool_count_ptr, jint* con instanceKlassHandle ikh(thread, k_oop); constantPoolHandle constants(thread, ikh->constants()); - MonitorLockerEx ml(constants->lock()); // lock constant pool while we query it + oop cplock = constants->lock(); + ObjectLocker ol(cplock, thread, cplock != NULL); // lock constant pool while we query it JvmtiConstantPoolReconstituter reconstituter(ikh); if (reconstituter.get_error() != JVMTI_ERROR_NONE) { diff --git a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp index 9eaa07a097c..4e8e8c0e097 100644 --- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp +++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp @@ -415,20 +415,26 @@ void VM_RedefineClasses::append_entry(constantPoolHandle scratch_cp, // this is an indirect CP entry so it needs special handling case JVM_CONSTANT_InvokeDynamic: { - // TBD: cross-checks and possible extra appends into CP and bsm operands - // are needed as well. This issue is tracked by a separate bug 8007037. - int bss_idx = scratch_cp->invoke_dynamic_bootstrap_specifier_index(scratch_i); - - int ref_i = scratch_cp->invoke_dynamic_name_and_type_ref_index_at(scratch_i); - int new_ref_i = find_or_append_indirect_entry(scratch_cp, ref_i, merge_cp_p, + // Index of the bootstrap specifier in the operands array + int old_bs_i = scratch_cp->invoke_dynamic_bootstrap_specifier_index(scratch_i); + int new_bs_i = find_or_append_operand(scratch_cp, old_bs_i, merge_cp_p, + merge_cp_length_p, THREAD); + // The bootstrap method NameAndType_info index + int old_ref_i = scratch_cp->invoke_dynamic_name_and_type_ref_index_at(scratch_i); + int new_ref_i = find_or_append_indirect_entry(scratch_cp, old_ref_i, merge_cp_p, merge_cp_length_p, THREAD); - if (new_ref_i != ref_i) { + if (new_bs_i != old_bs_i) { RC_TRACE(0x00080000, - ("InvokeDynamic entry@%d name_and_type ref_index change: %d to %d", - *merge_cp_length_p, ref_i, new_ref_i)); + ("InvokeDynamic entry@%d bootstrap_method_attr_index change: %d to %d", + *merge_cp_length_p, old_bs_i, new_bs_i)); + } + if (new_ref_i != old_ref_i) { + RC_TRACE(0x00080000, + ("InvokeDynamic entry@%d name_and_type_index change: %d to %d", + *merge_cp_length_p, old_ref_i, new_ref_i)); } - (*merge_cp_p)->invoke_dynamic_at_put(*merge_cp_length_p, bss_idx, new_ref_i); + (*merge_cp_p)->invoke_dynamic_at_put(*merge_cp_length_p, new_bs_i, new_ref_i); if (scratch_i != *merge_cp_length_p) { // The new entry in *merge_cp_p is at a different index than // the new entry in scratch_cp so we need to map the index values. @@ -492,6 +498,105 @@ int VM_RedefineClasses::find_or_append_indirect_entry(constantPoolHandle scratch } // end find_or_append_indirect_entry() +// Append a bootstrap specifier into the merge_cp operands that is semantically equal +// to the scratch_cp operands bootstrap specifier passed by the old_bs_i index. +// Recursively append new merge_cp entries referenced by the new bootstrap specifier. +void VM_RedefineClasses::append_operand(constantPoolHandle scratch_cp, int old_bs_i, + constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS) { + + int old_ref_i = scratch_cp->operand_bootstrap_method_ref_index_at(old_bs_i); + int new_ref_i = find_or_append_indirect_entry(scratch_cp, old_ref_i, merge_cp_p, + merge_cp_length_p, THREAD); + if (new_ref_i != old_ref_i) { + RC_TRACE(0x00080000, + ("operands entry@%d bootstrap method ref_index change: %d to %d", + _operands_cur_length, old_ref_i, new_ref_i)); + } + + Array * merge_ops = (*merge_cp_p)->operands(); + int new_bs_i = _operands_cur_length; + // We have _operands_cur_length == 0 when the merge_cp operands is empty yet. + // However, the operand_offset_at(0) was set in the extend_operands() call. + int new_base = (new_bs_i == 0) ? (*merge_cp_p)->operand_offset_at(0) + : (*merge_cp_p)->operand_next_offset_at(new_bs_i - 1); + int argc = scratch_cp->operand_argument_count_at(old_bs_i); + + ConstantPool::operand_offset_at_put(merge_ops, _operands_cur_length, new_base); + merge_ops->at_put(new_base++, new_ref_i); + merge_ops->at_put(new_base++, argc); + + for (int i = 0; i < argc; i++) { + int old_arg_ref_i = scratch_cp->operand_argument_index_at(old_bs_i, i); + int new_arg_ref_i = find_or_append_indirect_entry(scratch_cp, old_arg_ref_i, merge_cp_p, + merge_cp_length_p, THREAD); + merge_ops->at_put(new_base++, new_arg_ref_i); + if (new_arg_ref_i != old_arg_ref_i) { + RC_TRACE(0x00080000, + ("operands entry@%d bootstrap method argument ref_index change: %d to %d", + _operands_cur_length, old_arg_ref_i, new_arg_ref_i)); + } + } + if (old_bs_i != _operands_cur_length) { + // The bootstrap specifier in *merge_cp_p is at a different index than + // that in scratch_cp so we need to map the index values. + map_operand_index(old_bs_i, new_bs_i); + } + _operands_cur_length++; +} // end append_operand() + + +int VM_RedefineClasses::find_or_append_operand(constantPoolHandle scratch_cp, + int old_bs_i, constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS) { + + int new_bs_i = old_bs_i; // bootstrap specifier index + bool match = (old_bs_i < _operands_cur_length) && + scratch_cp->compare_operand_to(old_bs_i, *merge_cp_p, old_bs_i, THREAD); + + if (!match) { + // forward reference in *merge_cp_p or not a direct match + int found_i = scratch_cp->find_matching_operand(old_bs_i, *merge_cp_p, + _operands_cur_length, THREAD); + if (found_i != -1) { + guarantee(found_i != old_bs_i, "compare_operand_to() and find_matching_operand() disagree"); + // found a matching operand somewhere else in *merge_cp_p so just need a mapping + new_bs_i = found_i; + map_operand_index(old_bs_i, found_i); + } else { + // no match found so we have to append this bootstrap specifier to *merge_cp_p + append_operand(scratch_cp, old_bs_i, merge_cp_p, merge_cp_length_p, THREAD); + new_bs_i = _operands_cur_length - 1; + } + } + return new_bs_i; +} // end find_or_append_operand() + + +void VM_RedefineClasses::finalize_operands_merge(constantPoolHandle merge_cp, TRAPS) { + if (merge_cp->operands() == NULL) { + return; + } + // Shrink the merge_cp operands + merge_cp->shrink_operands(_operands_cur_length, CHECK); + + if (RC_TRACE_ENABLED(0x00040000)) { + // don't want to loop unless we are tracing + int count = 0; + for (int i = 1; i < _operands_index_map_p->length(); i++) { + int value = _operands_index_map_p->at(i); + if (value != -1) { + RC_TRACE_WITH_THREAD(0x00040000, THREAD, + ("operands_index_map[%d]: old=%d new=%d", count, i, value)); + count++; + } + } + } + // Clean-up + _operands_index_map_p = NULL; + _operands_cur_length = 0; + _operands_index_map_count = 0; +} // end finalize_operands_merge() + + jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( instanceKlassHandle the_class, instanceKlassHandle scratch_class) { @@ -765,6 +870,31 @@ int VM_RedefineClasses::find_new_index(int old_index) { } // end find_new_index() +// Find new bootstrap specifier index value for old bootstrap specifier index +// value by seaching the index map. Returns unused index (-1) if there is +// no mapped value for the old bootstrap specifier index. +int VM_RedefineClasses::find_new_operand_index(int old_index) { + if (_operands_index_map_count == 0) { + // map is empty so nothing can be found + return -1; + } + + if (old_index == -1 || old_index >= _operands_index_map_p->length()) { + // The old_index is out of range so it is not mapped. + // This should not happen in regular constant pool merging use. + return -1; + } + + int value = _operands_index_map_p->at(old_index); + if (value == -1) { + // the old_index is not mapped + return -1; + } + + return value; +} // end find_new_operand_index() + + // Returns true if the current mismatch is due to a resolved/unresolved // class pair. Otherwise, returns false. bool VM_RedefineClasses::is_unresolved_class_mismatch(constantPoolHandle cp1, @@ -1014,6 +1144,25 @@ void VM_RedefineClasses::map_index(constantPoolHandle scratch_cp, } // end map_index() +// Map old_index to new_index as needed. +void VM_RedefineClasses::map_operand_index(int old_index, int new_index) { + if (find_new_operand_index(old_index) != -1) { + // old_index is already mapped + return; + } + + if (old_index == new_index) { + // no mapping is needed + return; + } + + _operands_index_map_p->at_put(old_index, new_index); + _operands_index_map_count++; + + RC_TRACE(0x00040000, ("mapped bootstrap specifier at index %d to %d", old_index, new_index)); +} // end map_index() + + // Merge old_cp and scratch_cp and return the results of the merge via // merge_cp_p. The number of entries in *merge_cp_p is returned via // merge_cp_length_p. The entries in old_cp occupy the same locations @@ -1086,6 +1235,7 @@ bool VM_RedefineClasses::merge_constant_pools(constantPoolHandle old_cp, } // end for each old_cp entry ConstantPool::copy_operands(old_cp, *merge_cp_p, CHECK_0); + (*merge_cp_p)->extend_operands(scratch_cp, CHECK_0); // We don't need to sanity check that *merge_cp_length_p is within // *merge_cp_p bounds since we have the minimum on-entry check above. @@ -1198,6 +1348,8 @@ bool VM_RedefineClasses::merge_constant_pools(constantPoolHandle old_cp, CHECK_0); } + finalize_operands_merge(*merge_cp_p, THREAD); + RC_TRACE_WITH_THREAD(0x00020000, THREAD, ("after pass 1b: merge_cp_len=%d, scratch_i=%d, index_map_len=%d", *merge_cp_length_p, scratch_i, _index_map_count)); @@ -1270,6 +1422,11 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite( _index_map_count = 0; _index_map_p = new intArray(scratch_cp->length(), -1); + _operands_cur_length = ConstantPool::operand_array_length(old_cp->operands()); + _operands_index_map_count = 0; + _operands_index_map_p = new intArray( + ConstantPool::operand_array_length(scratch_cp->operands()), -1); + // reference to the cp holder is needed for copy_operands() merge_cp->set_pool_holder(scratch_class()); bool result = merge_constant_pools(old_cp, scratch_cp, &merge_cp, @@ -1400,7 +1557,6 @@ bool VM_RedefineClasses::rewrite_cp_refs(instanceKlassHandle scratch_class, return true; } // end rewrite_cp_refs() - // Rewrite constant pool references in the methods. bool VM_RedefineClasses::rewrite_cp_refs_in_methods( instanceKlassHandle scratch_class, TRAPS) { @@ -3284,6 +3440,16 @@ void VM_RedefineClasses::redefine_single_class(jclass the_jclass, // that reference methods of the evolved class. SystemDictionary::classes_do(adjust_cpool_cache_and_vtable, THREAD); + // JSR-292 support + MemberNameTable* mnt = the_class->member_names(); + if (mnt != NULL) { + bool trace_name_printed = false; + mnt->adjust_method_entries(_matching_old_methods, + _matching_new_methods, + _matching_methods_length, + &trace_name_printed); + } + // Fix Resolution Error table also to remove old constant pools SystemDictionary::delete_resolution_error(old_constants); diff --git a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.hpp b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.hpp index d2967482541..ffe9a7ed83b 100644 --- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.hpp +++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.hpp @@ -359,8 +359,15 @@ class VM_RedefineClasses: public VM_Operation { // _index_map_p contains any entries. int _index_map_count; intArray * _index_map_p; + + // _operands_index_map_count is just an optimization for knowing if + // _operands_index_map_p contains any entries. + int _operands_cur_length; + int _operands_index_map_count; + intArray * _operands_index_map_p; + // ptr to _class_count scratch_classes - Klass** _scratch_classes; + Klass** _scratch_classes; jvmtiError _res; // Performance measurement support. These timers do not cover all @@ -422,12 +429,19 @@ class VM_RedefineClasses: public VM_Operation { // Support for constant pool merging (these routines are in alpha order): void append_entry(constantPoolHandle scratch_cp, int scratch_i, constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS); + void append_operand(constantPoolHandle scratch_cp, int scratch_bootstrap_spec_index, + constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS); + void finalize_operands_merge(constantPoolHandle merge_cp, TRAPS); int find_or_append_indirect_entry(constantPoolHandle scratch_cp, int scratch_i, constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS); + int find_or_append_operand(constantPoolHandle scratch_cp, int scratch_bootstrap_spec_index, + constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS); int find_new_index(int old_index); + int find_new_operand_index(int old_bootstrap_spec_index); bool is_unresolved_class_mismatch(constantPoolHandle cp1, int index1, constantPoolHandle cp2, int index2); void map_index(constantPoolHandle scratch_cp, int old_index, int new_index); + void map_operand_index(int old_bootstrap_spec_index, int new_bootstrap_spec_index); bool merge_constant_pools(constantPoolHandle old_cp, constantPoolHandle scratch_cp, constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS); diff --git a/hotspot/src/share/vm/prims/jvmtiTagMap.cpp b/hotspot/src/share/vm/prims/jvmtiTagMap.cpp index 5d72029d264..99e4af5aa37 100644 --- a/hotspot/src/share/vm/prims/jvmtiTagMap.cpp +++ b/hotspot/src/share/vm/prims/jvmtiTagMap.cpp @@ -153,7 +153,8 @@ class JvmtiTagHashmap : public CHeapObj { size_t s = initial_size * sizeof(JvmtiTagHashmapEntry*); _table = (JvmtiTagHashmapEntry**)os::malloc(s, mtInternal); if (_table == NULL) { - vm_exit_out_of_memory(s, "unable to allocate initial hashtable for jvmti object tags"); + vm_exit_out_of_memory(s, OOM_MALLOC_ERROR, + "unable to allocate initial hashtable for jvmti object tags"); } for (int i=0; i allocate_instance(THREAD)); } -oop MethodHandles::init_MemberName(oop mname_oop, oop target_oop) { +oop MethodHandles::init_MemberName(Handle mname, Handle target) { + Thread* thread = Thread::current(); + oop target_oop = target(); Klass* target_klass = target_oop->klass(); if (target_klass == SystemDictionary::reflect_Field_klass()) { oop clazz = java_lang_reflect_Field::clazz(target_oop); // fd.field_holder() @@ -132,24 +136,24 @@ oop MethodHandles::init_MemberName(oop mname_oop, oop target_oop) { int mods = java_lang_reflect_Field::modifiers(target_oop); oop type = java_lang_reflect_Field::type(target_oop); oop name = java_lang_reflect_Field::name(target_oop); - Klass* k = java_lang_Class::as_Klass(clazz); - intptr_t offset = InstanceKlass::cast(k)->field_offset(slot); - return init_field_MemberName(mname_oop, k, accessFlags_from(mods), type, name, offset); + KlassHandle k(thread, java_lang_Class::as_Klass(clazz)); + intptr_t offset = InstanceKlass::cast(k())->field_offset(slot); + return init_field_MemberName(mname, k, accessFlags_from(mods), type, name, offset); } else if (target_klass == SystemDictionary::reflect_Method_klass()) { oop clazz = java_lang_reflect_Method::clazz(target_oop); int slot = java_lang_reflect_Method::slot(target_oop); - Klass* k = java_lang_Class::as_Klass(clazz); - if (k != NULL && k->oop_is_instance()) { - Method* m = InstanceKlass::cast(k)->method_with_idnum(slot); - return init_method_MemberName(mname_oop, m, true, k); + KlassHandle k(thread, java_lang_Class::as_Klass(clazz)); + if (!k.is_null() && k->oop_is_instance()) { + Method* m = InstanceKlass::cast(k())->method_with_idnum(slot); + return init_method_MemberName(mname, m, true, k); } } else if (target_klass == SystemDictionary::reflect_Constructor_klass()) { oop clazz = java_lang_reflect_Constructor::clazz(target_oop); int slot = java_lang_reflect_Constructor::slot(target_oop); - Klass* k = java_lang_Class::as_Klass(clazz); - if (k != NULL && k->oop_is_instance()) { - Method* m = InstanceKlass::cast(k)->method_with_idnum(slot); - return init_method_MemberName(mname_oop, m, false, k); + KlassHandle k(thread, java_lang_Class::as_Klass(clazz)); + if (!k.is_null() && k->oop_is_instance()) { + Method* m = InstanceKlass::cast(k())->method_with_idnum(slot); + return init_method_MemberName(mname, m, false, k); } } else if (target_klass == SystemDictionary::MemberName_klass()) { // Note: This only works if the MemberName has already been resolved. @@ -157,17 +161,18 @@ oop MethodHandles::init_MemberName(oop mname_oop, oop target_oop) { int flags = java_lang_invoke_MemberName::flags(target_oop); Metadata* vmtarget=java_lang_invoke_MemberName::vmtarget(target_oop); intptr_t vmindex = java_lang_invoke_MemberName::vmindex(target_oop); - Klass* k = java_lang_Class::as_Klass(clazz); + KlassHandle k(thread, java_lang_Class::as_Klass(clazz)); int ref_kind = (flags >> REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK; if (vmtarget == NULL) return NULL; // not resolved if ((flags & IS_FIELD) != 0) { assert(vmtarget->is_klass(), "field vmtarget is Klass*"); int basic_mods = (ref_kind_is_static(ref_kind) ? JVM_ACC_STATIC : 0); // FIXME: how does k (receiver_limit) contribute? - return init_field_MemberName(mname_oop, (Klass*)vmtarget, accessFlags_from(basic_mods), NULL, NULL, vmindex); + KlassHandle k_vmtarget(thread, (Klass*)vmtarget); + return init_field_MemberName(mname, k_vmtarget, accessFlags_from(basic_mods), NULL, NULL, vmindex); } else if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) != 0) { assert(vmtarget->is_method(), "method or constructor vmtarget is Method*"); - return init_method_MemberName(mname_oop, (Method*)vmtarget, ref_kind_does_dispatch(ref_kind), k); + return init_method_MemberName(mname, (Method*)vmtarget, ref_kind_does_dispatch(ref_kind), k); } else { return NULL; } @@ -175,8 +180,9 @@ oop MethodHandles::init_MemberName(oop mname_oop, oop target_oop) { return NULL; } -oop MethodHandles::init_method_MemberName(oop mname_oop, Method* m, bool do_dispatch, - Klass* receiver_limit) { +oop MethodHandles::init_method_MemberName(Handle mname, Method* m, bool do_dispatch, + KlassHandle receiver_limit_h) { + Klass* receiver_limit = receiver_limit_h(); AccessFlags mods = m->access_flags(); int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS ); int vmindex = Method::nonvirtual_vtable_index; // implies never any dispatch @@ -187,6 +193,10 @@ oop MethodHandles::init_method_MemberName(oop mname_oop, Method* m, bool do_disp flags |= IS_CONSTRUCTOR | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT); } else if (mods.is_static()) { flags |= IS_METHOD | (JVM_REF_invokeStatic << REFERENCE_KIND_SHIFT); + // Get vindex from itable if method holder is an interface. + if (m->method_holder()->is_interface()) { + vmindex = klassItable::compute_itable_index(m); + } } else if (receiver_limit != mklass && !receiver_limit->is_subtype_of(mklass)) { return NULL; // bad receiver limit @@ -213,6 +223,7 @@ oop MethodHandles::init_method_MemberName(oop mname_oop, Method* m, bool do_disp flags |= CALLER_SENSITIVE; } + oop mname_oop = mname(); java_lang_invoke_MemberName::set_flags( mname_oop, flags); java_lang_invoke_MemberName::set_vmtarget(mname_oop, m); java_lang_invoke_MemberName::set_vmindex( mname_oop, vmindex); // vtable/itable index @@ -225,10 +236,11 @@ oop MethodHandles::init_method_MemberName(oop mname_oop, Method* m, bool do_disp // This is done eagerly, since it is readily available without // constructing any new objects. // TO DO: maybe intern mname_oop - return mname_oop; + m->method_holder()->add_member_name(mname); + return mname(); } -Handle MethodHandles::init_method_MemberName(oop mname_oop, CallInfo& info, TRAPS) { +Handle MethodHandles::init_method_MemberName(Handle mname, CallInfo& info, TRAPS) { Handle empty; if (info.resolved_appendix().not_null()) { // The resolved MemberName must not be accompanied by an appendix argument, @@ -248,19 +260,20 @@ Handle MethodHandles::init_method_MemberName(oop mname_oop, CallInfo& info, TRAP } else { vmindex = info.vtable_index(); } - oop res = init_method_MemberName(mname_oop, m(), (vmindex >= 0), defc()); + oop res = init_method_MemberName(mname, m(), (vmindex >= 0), defc()); assert(res == NULL || (java_lang_invoke_MemberName::vmindex(res) == vmindex), ""); return Handle(THREAD, res); } -oop MethodHandles::init_field_MemberName(oop mname_oop, Klass* field_holder, +oop MethodHandles::init_field_MemberName(Handle mname, KlassHandle field_holder, AccessFlags mods, oop type, oop name, intptr_t offset, bool is_setter) { int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS ); flags |= IS_FIELD | ((mods.is_static() ? JVM_REF_getStatic : JVM_REF_getField) << REFERENCE_KIND_SHIFT); if (is_setter) flags += ((JVM_REF_putField - JVM_REF_getField) << REFERENCE_KIND_SHIFT); - Metadata* vmtarget = field_holder; + Metadata* vmtarget = field_holder(); int vmindex = offset; // determines the field uniquely when combined with static bit + oop mname_oop = mname(); java_lang_invoke_MemberName::set_flags(mname_oop, flags); java_lang_invoke_MemberName::set_vmtarget(mname_oop, vmtarget); java_lang_invoke_MemberName::set_vmindex(mname_oop, vmindex); @@ -277,10 +290,11 @@ oop MethodHandles::init_field_MemberName(oop mname_oop, Klass* field_holder, // Although the fieldDescriptor::_index would also identify the field, // we do not use it, because it is harder to decode. // TO DO: maybe intern mname_oop - return mname_oop; + InstanceKlass::cast(field_holder())->add_member_name(mname); + return mname(); } -Handle MethodHandles::init_field_MemberName(oop mname_oop, FieldAccessInfo& info, TRAPS) { +Handle MethodHandles::init_field_MemberName(Handle mname, FieldAccessInfo& info, TRAPS) { return Handle(); #if 0 // FIXME KlassHandle field_holder = info.klass(); @@ -679,7 +693,7 @@ Handle MethodHandles::resolve_MemberName(Handle mname, TRAPS) { return empty; } } - return init_method_MemberName(mname(), result, THREAD); + return init_method_MemberName(mname, result, THREAD); } case IS_CONSTRUCTOR: { @@ -697,7 +711,7 @@ Handle MethodHandles::resolve_MemberName(Handle mname, TRAPS) { } } assert(result.is_statically_bound(), ""); - return init_method_MemberName(mname(), result, THREAD); + return init_method_MemberName(mname, result, THREAD); } case IS_FIELD: { @@ -710,7 +724,7 @@ Handle MethodHandles::resolve_MemberName(Handle mname, TRAPS) { oop name = field_name_or_null(fd.name()); bool is_setter = (ref_kind_is_valid(ref_kind) && ref_kind_is_setter(ref_kind)); mname = Handle(THREAD, - init_field_MemberName(mname(), sel_klass(), + init_field_MemberName(mname, sel_klass, fd.access_flags(), type, name, fd.offset(), is_setter)); return mname; } @@ -802,16 +816,15 @@ void MethodHandles::expand_MemberName(Handle mname, int suppress, TRAPS) { THROW_MSG(vmSymbols::java_lang_InternalError(), "unrecognized MemberName format"); } -int MethodHandles::find_MemberNames(Klass* k, +int MethodHandles::find_MemberNames(KlassHandle k, Symbol* name, Symbol* sig, - int mflags, Klass* caller, - int skip, objArrayOop results) { - DEBUG_ONLY(No_Safepoint_Verifier nsv); - // this code contains no safepoints! - + int mflags, KlassHandle caller, + int skip, objArrayHandle results) { // %%% take caller into account! - if (k == NULL || !k->oop_is_instance()) return -1; + Thread* thread = Thread::current(); + + if (k.is_null() || !k->oop_is_instance()) return -1; int rfill = 0, rlimit = results->length(), rskip = skip; // overflow measurement: @@ -839,7 +852,7 @@ int MethodHandles::find_MemberNames(Klass* k, } if ((match_flags & IS_FIELD) != 0) { - for (FieldStream st(k, local_only, !search_intfc); !st.eos(); st.next()) { + for (FieldStream st(k(), local_only, !search_intfc); !st.eos(); st.next()) { if (name != NULL && st.name() != name) continue; if (sig != NULL && st.signature() != sig) @@ -848,15 +861,15 @@ int MethodHandles::find_MemberNames(Klass* k, if (rskip > 0) { --rskip; } else if (rfill < rlimit) { - oop result = results->obj_at(rfill++); - if (!java_lang_invoke_MemberName::is_instance(result)) + Handle result(thread, results->obj_at(rfill++)); + if (!java_lang_invoke_MemberName::is_instance(result())) return -99; // caller bug! oop type = field_signature_type_or_null(st.signature()); oop name = field_name_or_null(st.name()); - oop saved = MethodHandles::init_field_MemberName(result, st.klass()(), + oop saved = MethodHandles::init_field_MemberName(result, st.klass(), st.access_flags(), type, name, st.offset()); - if (saved != result) + if (saved != result()) results->obj_at_put(rfill-1, saved); // show saved instance to user } else if (++overflow >= overflow_limit) { match_flags = 0; break; // got tired of looking at overflow @@ -889,7 +902,7 @@ int MethodHandles::find_MemberNames(Klass* k, } else { // caller will accept either sort; no need to adjust name } - for (MethodStream st(k, local_only, !search_intfc); !st.eos(); st.next()) { + for (MethodStream st(k(), local_only, !search_intfc); !st.eos(); st.next()) { Method* m = st.method(); Symbol* m_name = m->name(); if (m_name == clinit_name) @@ -902,11 +915,11 @@ int MethodHandles::find_MemberNames(Klass* k, if (rskip > 0) { --rskip; } else if (rfill < rlimit) { - oop result = results->obj_at(rfill++); - if (!java_lang_invoke_MemberName::is_instance(result)) + Handle result(thread, results->obj_at(rfill++)); + if (!java_lang_invoke_MemberName::is_instance(result())) return -99; // caller bug! oop saved = MethodHandles::init_method_MemberName(result, m, true, NULL); - if (saved != result) + if (saved != result()) results->obj_at_put(rfill-1, saved); // show saved instance to user } else if (++overflow >= overflow_limit) { match_flags = 0; break; // got tired of looking at overflow @@ -917,6 +930,99 @@ int MethodHandles::find_MemberNames(Klass* k, // return number of elements we at leasted wanted to initialize return rfill + overflow; } + +//------------------------------------------------------------------------------ +// MemberNameTable +// + +MemberNameTable::MemberNameTable() : GrowableArray (10, true) { + assert_locked_or_safepoint(MemberNameTable_lock); +} + +MemberNameTable::~MemberNameTable() { + assert_locked_or_safepoint(MemberNameTable_lock); + int len = this->length(); + + for (int idx = 0; idx < len; idx++) { + jweak ref = this->at(idx); + JNIHandles::destroy_weak_global(ref); + } +} + +// Return entry index if found, return -1 otherwise. +int MemberNameTable::find_member_name(oop mem_name) { + assert_locked_or_safepoint(MemberNameTable_lock); + int len = this->length(); + + for (int idx = 0; idx < len; idx++) { + jweak ref = this->at(idx); + oop entry = JNIHandles::resolve(ref); + if (entry == mem_name) { + return idx; + } + } + return -1; +} + +void MemberNameTable::add_member_name(jweak mem_name_wref) { + assert_locked_or_safepoint(MemberNameTable_lock); + oop mem_name = JNIHandles::resolve(mem_name_wref); + + // Each member name may appear just once: add only if not found + if (find_member_name(mem_name) == -1) { + this->append(mem_name_wref); + } +} + +#if INCLUDE_JVMTI +oop MemberNameTable::find_member_name_by_method(Method* old_method) { + assert_locked_or_safepoint(MemberNameTable_lock); + oop found = NULL; + int len = this->length(); + + for (int idx = 0; idx < len; idx++) { + oop mem_name = JNIHandles::resolve(this->at(idx)); + if (mem_name == NULL) { + continue; + } + Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(mem_name); + if (method == old_method) { + found = mem_name; + break; + } + } + return found; +} + +// It is called at safepoint only +void MemberNameTable::adjust_method_entries(Method** old_methods, Method** new_methods, + int methods_length, bool *trace_name_printed) { + assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint"); + // search the MemberNameTable for uses of either obsolete or EMCP methods + for (int j = 0; j < methods_length; j++) { + Method* old_method = old_methods[j]; + Method* new_method = new_methods[j]; + oop mem_name = find_member_name_by_method(old_method); + if (mem_name != NULL) { + java_lang_invoke_MemberName::adjust_vmtarget(mem_name, new_method); + + if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { + if (!(*trace_name_printed)) { + // RC_TRACE_MESG macro has an embedded ResourceMark + RC_TRACE_MESG(("adjust: name=%s", + old_method->method_holder()->external_name())); + *trace_name_printed = true; + } + // RC_TRACE macro has an embedded ResourceMark + RC_TRACE(0x00400000, ("MemberName method update: %s(%s)", + new_method->name()->as_C_string(), + new_method->signature()->as_C_string())); + } + } + } +} +#endif // INCLUDE_JVMTI + // // Here are the native methods in java.lang.invoke.MethodHandleNatives // They are the private interface between this JVM and the HotSpot-specific @@ -1010,8 +1116,8 @@ JVM_ENTRY(void, MHN_init_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jobje if (mname_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "mname is null"); } if (target_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "target is null"); } Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh)); - oop target_oop = JNIHandles::resolve_non_null(target_jh); - MethodHandles::init_MemberName(mname(), target_oop); + Handle target(THREAD, JNIHandles::resolve_non_null(target_jh)); + MethodHandles::init_MemberName(mname, target); } JVM_END @@ -1118,7 +1224,7 @@ JVM_ENTRY(jobject, MHN_getMemberVMInfo(JNIEnv *env, jobject igcls, jobject mname x = ((Klass*) vmtarget)->java_mirror(); } else if (vmtarget->is_method()) { Handle mname2 = MethodHandles::new_MemberName(CHECK_NULL); - x = MethodHandles::init_method_MemberName(mname2(), (Method*)vmtarget, false, NULL); + x = MethodHandles::init_method_MemberName(mname2, (Method*)vmtarget, false, NULL); } result->obj_at_put(1, x); return JNIHandles::make_local(env, result()); @@ -1161,8 +1267,8 @@ JVM_ENTRY(jint, MHN_getMembers(JNIEnv *env, jobject igcls, // %%% TO DO } - int res = MethodHandles::find_MemberNames(k(), name, sig, mflags, - caller(), skip, results()); + int res = MethodHandles::find_MemberNames(k, name, sig, mflags, + caller, skip, results); // TO DO: expand at least some of the MemberNames, to avoid massive callbacks return res; } diff --git a/hotspot/src/share/vm/prims/methodHandles.hpp b/hotspot/src/share/vm/prims/methodHandles.hpp index 2e45bac1c60..f01a3c9fc1b 100644 --- a/hotspot/src/share/vm/prims/methodHandles.hpp +++ b/hotspot/src/share/vm/prims/methodHandles.hpp @@ -54,23 +54,23 @@ class MethodHandles: AllStatic { static Handle resolve_MemberName(Handle mname, TRAPS); // compute vmtarget/vmindex from name/type static void expand_MemberName(Handle mname, int suppress, TRAPS); // expand defc/name/type if missing static Handle new_MemberName(TRAPS); // must be followed by init_MemberName - static oop init_MemberName(oop mname_oop, oop target_oop); // compute vmtarget/vmindex from target - static oop init_method_MemberName(oop mname_oop, Method* m, bool do_dispatch, - Klass* receiver_limit); - static oop init_field_MemberName(oop mname_oop, Klass* field_holder, + static oop init_MemberName(Handle mname_h, Handle target_h); // compute vmtarget/vmindex from target + static oop init_method_MemberName(Handle mname_h, Method* m, bool do_dispatch, + KlassHandle receiver_limit_h); + static oop init_field_MemberName(Handle mname_h, KlassHandle field_holder_h, AccessFlags mods, oop type, oop name, intptr_t offset, bool is_setter = false); - static Handle init_method_MemberName(oop mname_oop, CallInfo& info, TRAPS); - static Handle init_field_MemberName(oop mname_oop, FieldAccessInfo& info, TRAPS); + static Handle init_method_MemberName(Handle mname_h, CallInfo& info, TRAPS); + static Handle init_field_MemberName(Handle mname_h, FieldAccessInfo& info, TRAPS); static int method_ref_kind(Method* m, bool do_dispatch_if_possible = true); - static int find_MemberNames(Klass* k, Symbol* name, Symbol* sig, - int mflags, Klass* caller, - int skip, objArrayOop results); + static int find_MemberNames(KlassHandle k, Symbol* name, Symbol* sig, + int mflags, KlassHandle caller, + int skip, objArrayHandle results); // bit values for suppress argument to expand_MemberName: enum { _suppress_defc = 1, _suppress_name = 2, _suppress_type = 4 }; // Generate MethodHandles adapters. - static void generate_adapters(); + static void generate_adapters(); // Called from MethodHandlesAdapterGenerator. static address generate_method_handle_interpreter_entry(MacroAssembler* _masm, vmIntrinsics::ID iid); @@ -230,4 +230,27 @@ public: void generate(); }; +//------------------------------------------------------------------------------ +// MemberNameTable +// +class MemberNameTable : public GrowableArray { + public: + MemberNameTable(); + ~MemberNameTable(); + void add_member_name(jweak mem_name_ref); + private: + int find_member_name(oop mem_name); + +#if INCLUDE_JVMTI + public: + // RedefineClasses() API support: + // If a MemberName refers to old_method then update it + // to refer to new_method. + void adjust_method_entries(Method** old_methods, Method** new_methods, + int methods_length, bool *trace_name_printed); + private: + oop find_member_name_by_method(Method* old_method); +#endif // INCLUDE_JVMTI +}; + #endif // SHARE_VM_PRIMS_METHODHANDLES_HPP diff --git a/hotspot/src/share/vm/prims/whitebox.cpp b/hotspot/src/share/vm/prims/whitebox.cpp index 1dfcd3d39a4..10e738fca41 100644 --- a/hotspot/src/share/vm/prims/whitebox.cpp +++ b/hotspot/src/share/vm/prims/whitebox.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -237,10 +237,10 @@ WB_ENTRY(jint, WB_GetMethodCompilationLevel(JNIEnv* env, jobject o, jobject meth WB_END -WB_ENTRY(void, WB_MakeMethodNotCompilable(JNIEnv* env, jobject o, jobject method)) +WB_ENTRY(void, WB_MakeMethodNotCompilable(JNIEnv* env, jobject o, jobject method, jint comp_level)) jmethodID jmid = reflected_method_to_jmid(thread, env, method); methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); - mh->set_not_compilable(); + mh->set_not_compilable(comp_level, true /* report */, "WhiteBox"); WB_END WB_ENTRY(jboolean, WB_TestSetDontInlineMethod(JNIEnv* env, jobject o, jobject method, jboolean value)) @@ -278,6 +278,7 @@ WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method)) methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); MutexLockerEx mu(Compile_lock); MethodData* mdo = mh->method_data(); + MethodCounters* mcs = mh->method_counters(); if (mdo != NULL) { mdo->init(); @@ -288,31 +289,29 @@ WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method)) } } - mh->backedge_counter()->init(); - mh->invocation_counter()->init(); - mh->set_interpreter_invocation_count(0); - mh->set_interpreter_throwout_count(0); mh->clear_not_c1_compilable(); mh->clear_not_c2_compilable(); mh->clear_not_c2_osr_compilable(); NOT_PRODUCT(mh->set_compiled_invocation_count(0)); + if (mcs != NULL) { + mcs->backedge_counter()->init(); + mcs->invocation_counter()->init(); + mcs->set_interpreter_invocation_count(0); + mcs->set_interpreter_throwout_count(0); #ifdef TIERED - mh->set_rate(0.0F); - mh->set_prev_event_count(0); - mh->set_prev_time(0); + mcs->set_rate(0.0F); + mh->set_prev_event_count(0, THREAD); + mh->set_prev_time(0, THREAD); #endif + } WB_END WB_ENTRY(jboolean, WB_IsInStringTable(JNIEnv* env, jobject o, jstring javaString)) ResourceMark rm(THREAD); int len; - jchar* name = java_lang_String::as_unicode_string(JNIHandles::resolve(javaString), len); - oop found_string = StringTable::the_table()->lookup(name, len); - if (found_string == NULL) { - return false; - } - return true; + jchar* name = java_lang_String::as_unicode_string(JNIHandles::resolve(javaString), len, CHECK_false); + return (StringTable::lookup(name, len) != NULL); WB_END @@ -321,6 +320,11 @@ WB_ENTRY(void, WB_FullGC(JNIEnv* env, jobject o)) Universe::heap()->collect(GCCause::_last_ditch_collection); WB_END + +WB_ENTRY(jlong, WB_ReserveMemory(JNIEnv* env, jobject o, jlong size)) + return (jlong)os::reserve_memory(size, NULL, 0); +WB_END + //Some convenience methods to deal with objects from java int WhiteBox::offset_for_field(const char* field_name, oop object, Symbol* signature_symbol) { @@ -398,30 +402,32 @@ static JNINativeMethod methods[] = { {CC"NMTWaitForDataMerge", CC"()Z", (void*)&WB_NMTWaitForDataMerge}, #endif // INCLUDE_NMT {CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll }, - {CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Method;)I", + {CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Executable;)I", (void*)&WB_DeoptimizeMethod }, - {CC"isMethodCompiled", CC"(Ljava/lang/reflect/Method;)Z", + {CC"isMethodCompiled", CC"(Ljava/lang/reflect/Executable;)Z", (void*)&WB_IsMethodCompiled }, - {CC"isMethodCompilable", CC"(Ljava/lang/reflect/Method;I)Z", + {CC"isMethodCompilable", CC"(Ljava/lang/reflect/Executable;I)Z", (void*)&WB_IsMethodCompilable}, {CC"isMethodQueuedForCompilation", - CC"(Ljava/lang/reflect/Method;)Z", (void*)&WB_IsMethodQueuedForCompilation}, + CC"(Ljava/lang/reflect/Executable;)Z", (void*)&WB_IsMethodQueuedForCompilation}, {CC"makeMethodNotCompilable", - CC"(Ljava/lang/reflect/Method;)V", (void*)&WB_MakeMethodNotCompilable}, + CC"(Ljava/lang/reflect/Executable;I)V", (void*)&WB_MakeMethodNotCompilable}, {CC"testSetDontInlineMethod", - CC"(Ljava/lang/reflect/Method;Z)Z", (void*)&WB_TestSetDontInlineMethod}, + CC"(Ljava/lang/reflect/Executable;Z)Z", (void*)&WB_TestSetDontInlineMethod}, {CC"getMethodCompilationLevel", - CC"(Ljava/lang/reflect/Method;)I", (void*)&WB_GetMethodCompilationLevel}, + CC"(Ljava/lang/reflect/Executable;)I", (void*)&WB_GetMethodCompilationLevel}, {CC"getCompileQueuesSize", CC"()I", (void*)&WB_GetCompileQueuesSize}, {CC"testSetForceInlineMethod", - CC"(Ljava/lang/reflect/Method;Z)Z", (void*)&WB_TestSetForceInlineMethod}, + CC"(Ljava/lang/reflect/Executable;Z)Z", (void*)&WB_TestSetForceInlineMethod}, {CC"enqueueMethodForCompilation", - CC"(Ljava/lang/reflect/Method;I)Z", (void*)&WB_EnqueueMethodForCompilation}, + CC"(Ljava/lang/reflect/Executable;I)Z", (void*)&WB_EnqueueMethodForCompilation}, {CC"clearMethodState", - CC"(Ljava/lang/reflect/Method;)V", (void*)&WB_ClearMethodState}, + CC"(Ljava/lang/reflect/Executable;)V", (void*)&WB_ClearMethodState}, {CC"isInStringTable", CC"(Ljava/lang/String;)Z", (void*)&WB_IsInStringTable }, {CC"fullGC", CC"()V", (void*)&WB_FullGC }, + + {CC"reserveMemory", CC"(J)J", (void*)&WB_ReserveMemory }, }; #undef CC @@ -433,9 +439,29 @@ JVM_ENTRY(void, JVM_RegisterWhiteBoxMethods(JNIEnv* env, jclass wbclass)) instanceKlassHandle ikh = instanceKlassHandle(JNIHandles::resolve(wbclass)->klass()); Handle loader(ikh->class_loader()); if (loader.is_null()) { + ResourceMark rm; ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI - jint result = env->RegisterNatives(wbclass, methods, sizeof(methods)/sizeof(methods[0])); - if (result == 0) { + bool result = true; + // one by one registration natives for exception catching + jclass exceptionKlass = env->FindClass(vmSymbols::java_lang_NoSuchMethodError()->as_C_string()); + for (int i = 0, n = sizeof(methods) / sizeof(methods[0]); i < n; ++i) { + if (env->RegisterNatives(wbclass, methods + i, 1) != 0) { + result = false; + if (env->ExceptionCheck() && env->IsInstanceOf(env->ExceptionOccurred(), exceptionKlass)) { + // j.l.NoSuchMethodError is thrown when a method can't be found or a method is not native + // ignoring the exception + tty->print_cr("Warning: 'NoSuchMethodError' on register of sun.hotspot.WhiteBox::%s%s", methods[i].name, methods[i].signature); + env->ExceptionClear(); + } else { + // register is failed w/o exception or w/ unexpected exception + tty->print_cr("Warning: unexpected error on register of sun.hotspot.WhiteBox::%s%s. All methods will be unregistered", methods[i].name, methods[i].signature); + env->UnregisterNatives(wbclass); + break; + } + } + } + + if (result) { WhiteBox::set_used(); } } diff --git a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp index ce5804a6052..81c2b74739d 100644 --- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp +++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,10 +74,11 @@ void AdvancedThresholdPolicy::initialize() { // update_rate() is called from select_task() while holding a compile queue lock. void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) { + JavaThread* THREAD = JavaThread::current(); if (is_old(m)) { // We don't remove old methods from the queue, // so we can just zero the rate. - m->set_rate(0); + m->set_rate(0, THREAD); return; } @@ -93,13 +94,13 @@ void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) { if (delta_s >= TieredRateUpdateMinTime) { // And we must've taken the previous point at least 1ms before. if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) { - m->set_prev_time(t); - m->set_prev_event_count(event_count); - m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond + m->set_prev_time(t, THREAD); + m->set_prev_event_count(event_count, THREAD); + m->set_rate((float)delta_e / (float)delta_t, THREAD); // Rate is events per millisecond } else if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) { // If nothing happened for 25ms, zero the rate. Don't modify prev values. - m->set_rate(0); + m->set_rate(0, THREAD); } } } diff --git a/hotspot/src/share/vm/runtime/arguments.cpp b/hotspot/src/share/vm/runtime/arguments.cpp index 6db5ff5820d..b2ff4e5f9d2 100644 --- a/hotspot/src/share/vm/runtime/arguments.cpp +++ b/hotspot/src/share/vm/runtime/arguments.cpp @@ -1901,7 +1901,7 @@ bool Arguments::check_vm_args_consistency() { // Divide by bucket size to prevent a large size from causing rollover when // calculating amount of memory needed to be allocated for the String table. - status = status && verify_interval(StringTableSize, defaultStringTableSize, + status = status && verify_interval(StringTableSize, minimumStringTableSize, (max_uintx / StringTable::bucket_size()), "StringTable size"); if (MinHeapFreeRatio > MaxHeapFreeRatio) { @@ -2224,6 +2224,55 @@ jint Arguments::parse_vm_init_args(const JavaVMInitArgs* args) { return JNI_OK; } +// Checks if name in command-line argument -agent{lib,path}:name[=options] +// represents a valid HPROF of JDWP agent. is_path==true denotes that we +// are dealing with -agentpath (case where name is a path), otherwise with +// -agentlib +bool valid_hprof_or_jdwp_agent(char *name, bool is_path) { + char *_name; + const char *_hprof = "hprof", *_jdwp = "jdwp"; + size_t _len_hprof, _len_jdwp, _len_prefix; + + if (is_path) { + if ((_name = strrchr(name, (int) *os::file_separator())) == NULL) { + return false; + } + + _name++; // skip past last path separator + _len_prefix = strlen(JNI_LIB_PREFIX); + + if (strncmp(_name, JNI_LIB_PREFIX, _len_prefix) != 0) { + return false; + } + + _name += _len_prefix; + _len_hprof = strlen(_hprof); + _len_jdwp = strlen(_jdwp); + + if (strncmp(_name, _hprof, _len_hprof) == 0) { + _name += _len_hprof; + } + else if (strncmp(_name, _jdwp, _len_jdwp) == 0) { + _name += _len_jdwp; + } + else { + return false; + } + + if (strcmp(_name, JNI_LIB_SUFFIX) != 0) { + return false; + } + + return true; + } + + if (strcmp(name, _hprof) == 0 || strcmp(name, _jdwp) == 0) { + return true; + } + + return false; +} + jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, SysClassPath* scp_p, bool* scp_assembly_required_p, @@ -2322,7 +2371,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(pos + 1) + 1, mtInternal), pos + 1); } #if !INCLUDE_JVMTI - if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) { + if (valid_hprof_or_jdwp_agent(name, is_absolute_path)) { jio_fprintf(defaultStream::error_stream(), "Profiling and debugging agents are not supported in this VM\n"); return JNI_ERR; diff --git a/hotspot/src/share/vm/runtime/compilationPolicy.cpp b/hotspot/src/share/vm/runtime/compilationPolicy.cpp index 71fa794cd2f..8fff9586b5b 100644 --- a/hotspot/src/share/vm/runtime/compilationPolicy.cpp +++ b/hotspot/src/share/vm/runtime/compilationPolicy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -109,6 +109,9 @@ bool CompilationPolicy::must_be_compiled(methodHandle m, int comp_level) { // Returns true if m is allowed to be compiled bool CompilationPolicy::can_be_compiled(methodHandle m, int comp_level) { + // allow any levels for WhiteBox + assert(WhiteBoxAPI || comp_level == CompLevel_all || is_compile(comp_level), "illegal compilation level"); + if (m->is_abstract()) return false; if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false; @@ -122,7 +125,13 @@ bool CompilationPolicy::can_be_compiled(methodHandle m, int comp_level) { return false; } if (comp_level == CompLevel_all) { - return !m->is_not_compilable(CompLevel_simple) && !m->is_not_compilable(CompLevel_full_optimization); + if (TieredCompilation) { + // enough to be compilable at any level for tiered + return !m->is_not_compilable(CompLevel_simple) || !m->is_not_compilable(CompLevel_full_optimization); + } else { + // must be compilable at available level for non-tiered + return !m->is_not_compilable(CompLevel_highest_tier); + } } else if (is_compile(comp_level)) { return !m->is_not_compilable(comp_level); } @@ -198,8 +207,10 @@ void NonTieredCompPolicy::reset_counter_for_invocation_event(methodHandle m) { // BUT also make sure the method doesn't look like it was never executed. // Set carry bit and reduce counter's value to min(count, CompileThreshold/2). - m->invocation_counter()->set_carry(); - m->backedge_counter()->set_carry(); + MethodCounters* mcs = m->method_counters(); + assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); + mcs->invocation_counter()->set_carry(); + mcs->backedge_counter()->set_carry(); assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed"); } @@ -207,8 +218,10 @@ void NonTieredCompPolicy::reset_counter_for_invocation_event(methodHandle m) { void NonTieredCompPolicy::reset_counter_for_back_branch_event(methodHandle m) { // Delay next back-branch event but pump up invocation counter to triger // whole method compilation. - InvocationCounter* i = m->invocation_counter(); - InvocationCounter* b = m->backedge_counter(); + MethodCounters* mcs = m->method_counters(); + assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); + InvocationCounter* i = mcs->invocation_counter(); + InvocationCounter* b = mcs->backedge_counter(); // Don't set invocation_counter's value too low otherwise the method will // look like immature (ic < ~5300) which prevents the inlining based on @@ -227,7 +240,10 @@ void NonTieredCompPolicy::reset_counter_for_back_branch_event(methodHandle m) { class CounterDecay : public AllStatic { static jlong _last_timestamp; static void do_method(Method* m) { - m->invocation_counter()->decay(); + MethodCounters* mcs = m->method_counters(); + if (mcs != NULL) { + mcs->invocation_counter()->decay(); + } } public: static void decay(); @@ -265,30 +281,44 @@ void NonTieredCompPolicy::do_safepoint_work() { void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) { ScopeDesc* sd = trap_scope; + MethodCounters* mcs; + InvocationCounter* c; for (; !sd->is_top(); sd = sd->sender()) { - // Reset ICs of inlined methods, since they can trigger compilations also. - sd->method()->invocation_counter()->reset(); + mcs = sd->method()->method_counters(); + if (mcs != NULL) { + // Reset ICs of inlined methods, since they can trigger compilations also. + mcs->invocation_counter()->reset(); + } } - InvocationCounter* c = sd->method()->invocation_counter(); - if (is_osr) { - // It was an OSR method, so bump the count higher. - c->set(c->state(), CompileThreshold); - } else { - c->reset(); + mcs = sd->method()->method_counters(); + if (mcs != NULL) { + c = mcs->invocation_counter(); + if (is_osr) { + // It was an OSR method, so bump the count higher. + c->set(c->state(), CompileThreshold); + } else { + c->reset(); + } + mcs->backedge_counter()->reset(); } - sd->method()->backedge_counter()->reset(); } // This method can be called by any component of the runtime to notify the policy // that it's recommended to delay the complation of this method. void NonTieredCompPolicy::delay_compilation(Method* method) { - method->invocation_counter()->decay(); - method->backedge_counter()->decay(); + MethodCounters* mcs = method->method_counters(); + if (mcs != NULL) { + mcs->invocation_counter()->decay(); + mcs->backedge_counter()->decay(); + } } void NonTieredCompPolicy::disable_compilation(Method* method) { - method->invocation_counter()->set_state(InvocationCounter::wait_for_nothing); - method->backedge_counter()->set_state(InvocationCounter::wait_for_nothing); + MethodCounters* mcs = method->method_counters(); + if (mcs != NULL) { + mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing); + mcs->backedge_counter()->set_state(InvocationCounter::wait_for_nothing); + } } CompileTask* NonTieredCompPolicy::select_task(CompileQueue* compile_queue) { @@ -371,8 +401,10 @@ nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, i #ifndef PRODUCT void NonTieredCompPolicy::trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci) { if (TraceInvocationCounterOverflow) { - InvocationCounter* ic = m->invocation_counter(); - InvocationCounter* bc = m->backedge_counter(); + MethodCounters* mcs = m->method_counters(); + assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); + InvocationCounter* ic = mcs->invocation_counter(); + InvocationCounter* bc = mcs->backedge_counter(); ResourceMark rm; const char* msg = bci == InvocationEntryBci @@ -413,7 +445,7 @@ void SimpleCompPolicy::method_invocation_event(methodHandle m, JavaThread* threa reset_counter_for_invocation_event(m); const char* comment = "count"; - if (is_compilation_enabled() && can_be_compiled(m)) { + if (is_compilation_enabled() && can_be_compiled(m, comp_level)) { nmethod* nm = m->code(); if (nm == NULL ) { CompileBroker::compile_method(m, InvocationEntryBci, comp_level, m, hot_count, comment, thread); @@ -426,7 +458,7 @@ void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, JavaThr const int hot_count = m->backedge_count(); const char* comment = "backedge_count"; - if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m)) { + if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m, comp_level)) { CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread); NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));) } @@ -444,7 +476,7 @@ void StackWalkCompPolicy::method_invocation_event(methodHandle m, JavaThread* th reset_counter_for_invocation_event(m); const char* comment = "count"; - if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m)) { + if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m, comp_level)) { ResourceMark rm(thread); frame fr = thread->last_frame(); assert(fr.is_interpreted_frame(), "must be interpreted"); @@ -482,7 +514,7 @@ void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, Java const int hot_count = m->backedge_count(); const char* comment = "backedge_count"; - if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m)) { + if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m, comp_level)) { CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread); NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));) } @@ -577,7 +609,7 @@ RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray * stack // If the caller method is too big or something then we do not want to // compile it just to inline a method - if (!can_be_compiled(next_m)) { + if (!can_be_compiled(next_m, CompLevel_any)) { msg = "caller cannot be compiled"; break; } diff --git a/hotspot/src/share/vm/runtime/fprofiler.cpp b/hotspot/src/share/vm/runtime/fprofiler.cpp index ce910b45fca..111c4db5aa3 100644 --- a/hotspot/src/share/vm/runtime/fprofiler.cpp +++ b/hotspot/src/share/vm/runtime/fprofiler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -421,7 +421,8 @@ class interpretedNode : public ProfilerNode { void print_method_on(outputStream* st) { ProfilerNode::print_method_on(st); - if (Verbose) method()->invocation_counter()->print_short(); + MethodCounters* mcs = method()->method_counters(); + if (Verbose && mcs != NULL) mcs->invocation_counter()->print_short(); } }; diff --git a/hotspot/src/share/vm/runtime/globals.hpp b/hotspot/src/share/vm/runtime/globals.hpp index 011112ce9a5..f927120dcb4 100644 --- a/hotspot/src/share/vm/runtime/globals.hpp +++ b/hotspot/src/share/vm/runtime/globals.hpp @@ -2123,6 +2123,9 @@ class CommandLineFlags { product(intx, PrefetchFieldsAhead, -1, \ "How many fields ahead to prefetch in oop scan (<= 0 means off)") \ \ + diagnostic(bool, VerifySilently, false, \ + "Don't print print the verification progress") \ + \ diagnostic(bool, VerifyDuringStartup, false, \ "Verify memory system before executing any Java code " \ "during VM initialization") \ @@ -3179,6 +3182,9 @@ class CommandLineFlags { product(uintx, CodeCacheFlushingMinimumFreeSpace, 1500*K, \ "When less than X space left, start code cache cleaning") \ \ + product(uintx, CodeCacheFlushingFraction, 2, \ + "Fraction of the code cache that is flushed when full") \ + \ /* interpreter debugging */ \ develop(intx, BinarySwitchThreshold, 5, \ "Minimal number of lookupswitch entries for rewriting to binary " \ @@ -3223,8 +3229,9 @@ class CommandLineFlags { develop(bool, ReplayCompiles, false, \ "Enable replay of compilations from ReplayDataFile") \ \ - develop(ccstr, ReplayDataFile, "replay.txt", \ - "file containing compilation replay information") \ + product(ccstr, ReplayDataFile, NULL, \ + "File containing compilation replay information" \ + "[default: ./replay_pid%p.log] (%p replaced with pid)") \ \ develop(intx, ReplaySuppressInitializers, 2, \ "Controls handling of class initialization during replay" \ @@ -3237,8 +3244,8 @@ class CommandLineFlags { develop(bool, ReplayIgnoreInitErrors, false, \ "Ignore exceptions thrown during initialization for replay") \ \ - develop(bool, DumpReplayDataOnError, true, \ - "record replay data for crashing compiler threads") \ + product(bool, DumpReplayDataOnError, true, \ + "Record replay data for crashing compiler threads") \ \ product(bool, CICompilerCountPerCPU, false, \ "1 compiler thread for log(N CPUs)") \ @@ -3247,7 +3254,9 @@ class CommandLineFlags { "Fire OutOfMemoryErrors throughout CI for testing the compiler " \ "(non-negative value throws OOM after this many CI accesses " \ "in each compile)") \ - \ + notproduct(intx, CICrashAt, -1, \ + "id of compilation to trigger assert in compiler thread for " \ + "the purpose of testing, e.g. generation of replay data") \ notproduct(bool, CIObjectFactoryVerify, false, \ "enable potentially expensive verification in ciObjectFactory") \ \ diff --git a/hotspot/src/share/vm/runtime/mutexLocker.cpp b/hotspot/src/share/vm/runtime/mutexLocker.cpp index 31fe66f385c..c386ae8f4e8 100644 --- a/hotspot/src/share/vm/runtime/mutexLocker.cpp +++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp @@ -46,6 +46,7 @@ Mutex* VMStatistic_lock = NULL; Mutex* JNIGlobalHandle_lock = NULL; Mutex* JNIHandleBlockFreeList_lock = NULL; Mutex* JNICachedItableIndex_lock = NULL; +Mutex* MemberNameTable_lock = NULL; Mutex* JmethodIdCreation_lock = NULL; Mutex* JfieldIdCreation_lock = NULL; Monitor* JNICritical_lock = NULL; @@ -252,6 +253,7 @@ void mutex_init() { def(Heap_lock , Monitor, nonleaf+1, false); def(JfieldIdCreation_lock , Mutex , nonleaf+1, true ); // jfieldID, Used in VM_Operation def(JNICachedItableIndex_lock , Mutex , nonleaf+1, false); // Used to cache an itable index during JNI invoke + def(MemberNameTable_lock , Mutex , nonleaf+1, false); // Used to protect MemberNameTable def(CompiledIC_lock , Mutex , nonleaf+2, false); // locks VtableStubs_lock, InlineCacheBuffer_lock def(CompileTaskAlloc_lock , Mutex , nonleaf+2, true ); diff --git a/hotspot/src/share/vm/runtime/mutexLocker.hpp b/hotspot/src/share/vm/runtime/mutexLocker.hpp index 7fae11b6467..7a2e240bd4f 100644 --- a/hotspot/src/share/vm/runtime/mutexLocker.hpp +++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp @@ -51,6 +51,7 @@ extern Mutex* VMStatistic_lock; // a lock used to guard statist extern Mutex* JNIGlobalHandle_lock; // a lock on creating JNI global handles extern Mutex* JNIHandleBlockFreeList_lock; // a lock on the JNI handle block free list extern Mutex* JNICachedItableIndex_lock; // a lock on caching an itable index during JNI invoke +extern Mutex* MemberNameTable_lock; // a lock on the MemberNameTable updates extern Mutex* JmethodIdCreation_lock; // a lock on creating JNI method identifiers extern Mutex* JfieldIdCreation_lock; // a lock on creating JNI static field identifiers extern Monitor* JNICritical_lock; // a lock used while entering and exiting JNI critical regions, allows GC to sometimes get in diff --git a/hotspot/src/share/vm/runtime/objectMonitor.cpp b/hotspot/src/share/vm/runtime/objectMonitor.cpp index d6d2f1c3173..27b6243a503 100644 --- a/hotspot/src/share/vm/runtime/objectMonitor.cpp +++ b/hotspot/src/share/vm/runtime/objectMonitor.cpp @@ -2404,7 +2404,7 @@ void ObjectMonitor::DeferredInitialize () { size_t sz = strlen (SyncKnobs) ; char * knobs = (char *) malloc (sz + 2) ; if (knobs == NULL) { - vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ; + vm_exit_out_of_memory (sz + 2, OOM_MALLOC_ERROR, "Parse SyncKnobs") ; guarantee (0, "invariant") ; } strcpy (knobs, SyncKnobs) ; diff --git a/hotspot/src/share/vm/runtime/os.hpp b/hotspot/src/share/vm/runtime/os.hpp index 82f45df60b8..2ea7b8b7546 100644 --- a/hotspot/src/share/vm/runtime/os.hpp +++ b/hotspot/src/share/vm/runtime/os.hpp @@ -454,6 +454,7 @@ class os: AllStatic { // File i/o operations static const int default_file_open_flags(); static int open(const char *path, int oflag, int mode); + static FILE* open(int fd, const char* mode); static int close(int fd); static jlong lseek(int fd, jlong offset, int whence); static char* native_path(char *path); @@ -477,7 +478,7 @@ class os: AllStatic { static const char* dll_file_extension(); static const char* get_temp_directory(); - static const char* get_current_directory(char *buf, int buflen); + static const char* get_current_directory(char *buf, size_t buflen); // Builds a platform-specific full library path given a ld path and lib name // Returns true if buffer contains full path to existing file, false otherwise diff --git a/hotspot/src/share/vm/runtime/sharedRuntime.cpp b/hotspot/src/share/vm/runtime/sharedRuntime.cpp index 09a0b65b958..57af1f3bda7 100644 --- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp +++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp @@ -1316,12 +1316,6 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread)) assert(stub_frame.is_runtime_frame(), "sanity check"); frame caller_frame = stub_frame.sender(®_map); - // MethodHandle invokes don't have a CompiledIC and should always - // simply redispatch to the callee_target. - address sender_pc = caller_frame.pc(); - CodeBlob* sender_cb = caller_frame.cb(); - nmethod* sender_nm = sender_cb->as_nmethod_or_null(); - if (caller_frame.is_interpreted_frame() || caller_frame.is_entry_frame()) { Method* callee = thread->callee_target(); diff --git a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp index fbb0f89c380..ad4c27cf29e 100644 --- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp +++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -153,8 +153,11 @@ void SimpleThresholdPolicy::set_carry_if_necessary(InvocationCounter *counter) { // Set carry flags on the counters if necessary void SimpleThresholdPolicy::handle_counter_overflow(Method* method) { - set_carry_if_necessary(method->invocation_counter()); - set_carry_if_necessary(method->backedge_counter()); + MethodCounters *mcs = method->method_counters(); + if (mcs != NULL) { + set_carry_if_necessary(mcs->invocation_counter()); + set_carry_if_necessary(mcs->backedge_counter()); + } MethodData* mdo = method->method_data(); if (mdo != NULL) { set_carry_if_necessary(mdo->invocation_counter()); diff --git a/hotspot/src/share/vm/runtime/stubRoutines.cpp b/hotspot/src/share/vm/runtime/stubRoutines.cpp index 98d428abdab..c559865b821 100644 --- a/hotspot/src/share/vm/runtime/stubRoutines.cpp +++ b/hotspot/src/share/vm/runtime/stubRoutines.cpp @@ -147,7 +147,7 @@ void StubRoutines::initialize1() { TraceTime timer("StubRoutines generation 1", TraceStartupTime); _code1 = BufferBlob::create("StubRoutines (1)", code_size1); if (_code1 == NULL) { - vm_exit_out_of_memory(code_size1, "CodeCache: no room for StubRoutines (1)"); + vm_exit_out_of_memory(code_size1, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (1)"); } CodeBuffer buffer(_code1); StubGenerator_generate(&buffer, false); @@ -199,7 +199,7 @@ void StubRoutines::initialize2() { TraceTime timer("StubRoutines generation 2", TraceStartupTime); _code2 = BufferBlob::create("StubRoutines (2)", code_size2); if (_code2 == NULL) { - vm_exit_out_of_memory(code_size2, "CodeCache: no room for StubRoutines (2)"); + vm_exit_out_of_memory(code_size2, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (2)"); } CodeBuffer buffer(_code2); StubGenerator_generate(&buffer, true); diff --git a/hotspot/src/share/vm/runtime/sweeper.cpp b/hotspot/src/share/vm/runtime/sweeper.cpp index 7c516dfe951..2921b2544b7 100644 --- a/hotspot/src/share/vm/runtime/sweeper.cpp +++ b/hotspot/src/share/vm/runtime/sweeper.cpp @@ -136,13 +136,12 @@ volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progre jint NMethodSweeper::_locked_seen = 0; jint NMethodSweeper::_not_entrant_seen_on_stack = 0; -bool NMethodSweeper::_rescan = false; -bool NMethodSweeper::_do_sweep = false; -bool NMethodSweeper::_was_full = false; -jint NMethodSweeper::_advise_to_sweep = 0; -jlong NMethodSweeper::_last_was_full = 0; -uint NMethodSweeper::_highest_marked = 0; -long NMethodSweeper::_was_full_traversal = 0; +bool NMethodSweeper::_resweep = false; +jint NMethodSweeper::_flush_token = 0; +jlong NMethodSweeper::_last_full_flush_time = 0; +int NMethodSweeper::_highest_marked = 0; +int NMethodSweeper::_dead_compile_ids = 0; +long NMethodSweeper::_last_flush_traversal_id = 0; class MarkActivationClosure: public CodeBlobClosure { public: @@ -155,20 +154,16 @@ public: }; static MarkActivationClosure mark_activation_closure; +bool NMethodSweeper::sweep_in_progress() { + return (_current != NULL); +} + void NMethodSweeper::scan_stacks() { assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); if (!MethodFlushing) return; - _do_sweep = true; // No need to synchronize access, since this is always executed at a - // safepoint. If we aren't in the middle of scan and a rescan - // hasn't been requested then just return. If UseCodeCacheFlushing is on and - // code cache flushing is in progress, don't skip sweeping to help make progress - // clearing space in the code cache. - if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) { - _do_sweep = false; - return; - } + // safepoint. // Make sure CompiledIC_lock in unlocked, since we might update some // inline caches. If it is, we just bail-out and try later. @@ -176,7 +171,7 @@ void NMethodSweeper::scan_stacks() { // Check for restart assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); - if (_current == NULL) { + if (!sweep_in_progress() && _resweep) { _seen = 0; _invocations = NmethodSweepFraction; _current = CodeCache::first_nmethod(); @@ -187,39 +182,30 @@ void NMethodSweeper::scan_stacks() { Threads::nmethods_do(&mark_activation_closure); // reset the flags since we started a scan from the beginning. - _rescan = false; + _resweep = false; _locked_seen = 0; _not_entrant_seen_on_stack = 0; } if (UseCodeCacheFlushing) { - if (!CodeCache::needs_flushing()) { - // scan_stacks() runs during a safepoint, no race with setters - _advise_to_sweep = 0; + // only allow new flushes after the interval is complete. + jlong now = os::javaTimeMillis(); + jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; + jlong curr_interval = now - _last_full_flush_time; + if (curr_interval > max_interval) { + _flush_token = 0; } - if (was_full()) { - // There was some progress so attempt to restart the compiler - jlong now = os::javaTimeMillis(); - jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; - jlong curr_interval = now - _last_was_full; - if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) { - CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); - set_was_full(false); - - // Update the _last_was_full time so we can tell how fast the - // code cache is filling up - _last_was_full = os::javaTimeMillis(); - - log_sweep("restart_compiler"); - } + if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) { + CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); + log_sweep("restart_compiler"); } } } void NMethodSweeper::possibly_sweep() { assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); - if ((!MethodFlushing) || (!_do_sweep)) return; + if (!MethodFlushing || !sweep_in_progress()) return; if (_invocations > 0) { // Only one thread at a time will sweep @@ -253,6 +239,14 @@ void NMethodSweeper::sweep_code_cache() { tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations); } + if (!CompileBroker::should_compile_new_jobs()) { + // If we have turned off compilations we might as well do full sweeps + // in order to reach the clean state faster. Otherwise the sleeping compiler + // threads will slow down sweeping. After a few iterations the cache + // will be clean and sweeping stops (_resweep will not be set) + _invocations = 1; + } + // We want to visit all nmethods after NmethodSweepFraction // invocations so divide the remaining number of nmethods by the // remaining number of invocations. This is only an estimate since @@ -296,7 +290,7 @@ void NMethodSweeper::sweep_code_cache() { assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache"); - if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) { + if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) { // we've completed a scan without making progress but there were // nmethods we were unable to process either because they were // locked or were still on stack. We don't have to aggresively @@ -318,6 +312,13 @@ void NMethodSweeper::sweep_code_cache() { if (_invocations == 1) { log_sweep("finished"); } + + // Sweeper is the only case where memory is released, + // check here if it is time to restart the compiler. + if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs() && !CodeCache::needs_flushing()) { + CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); + log_sweep("restart_compiler"); + } } class NMethodMarker: public StackObj { @@ -392,7 +393,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) { tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); } nm->mark_for_reclamation(); - _rescan = true; + _resweep = true; SWEEP(nm); } } else if (nm->is_not_entrant()) { @@ -403,7 +404,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) { tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); } nm->make_zombie(); - _rescan = true; + _resweep = true; SWEEP(nm); } else { // Still alive, clean up its inline caches @@ -425,16 +426,15 @@ void NMethodSweeper::process_nmethod(nmethod *nm) { release_nmethod(nm); } else { nm->make_zombie(); - _rescan = true; + _resweep = true; SWEEP(nm); } } else { assert(nm->is_alive(), "should be alive"); if (UseCodeCacheFlushing) { - if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) && - (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) && - CodeCache::needs_flushing()) { + if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() && + (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) { // This method has not been called since the forced cleanup happened nm->make_not_entrant(); } @@ -457,41 +457,27 @@ void NMethodSweeper::process_nmethod(nmethod *nm) { // _code field is restored and the Method*/nmethod // go back to their normal state. void NMethodSweeper::handle_full_code_cache(bool is_full) { - // Only the first one to notice can advise us to start early cleaning - if (!is_full){ - jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 ); - if (old != 0) { - return; - } - } if (is_full) { // Since code cache is full, immediately stop new compiles - bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation); - if (!did_set) { - // only the first to notice can start the cleaning, - // others will go back and block - return; + if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) { + log_sweep("disable_compiler"); } - set_was_full(true); + } - // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up - jlong now = os::javaTimeMillis(); - jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; - jlong curr_interval = now - _last_was_full; - if (curr_interval < max_interval) { - _rescan = true; - log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'", - curr_interval/1000); - return; - } + // Make sure only one thread can flush + // The token is reset after CodeCacheMinimumFlushInterval in scan stacks, + // no need to check the timeout here. + jint old = Atomic::cmpxchg( 1, &_flush_token, 0 ); + if (old != 0) { + return; } VM_HandleFullCodeCache op(is_full); VMThread::execute(&op); - // rescan again as soon as possible - _rescan = true; + // resweep again as soon as possible + _resweep = true; } void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) { @@ -500,62 +486,64 @@ void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) { debug_only(jlong start = os::javaTimeMillis();) - if ((!was_full()) && (is_full)) { - if (!CodeCache::needs_flushing()) { - log_sweep("restart_compiler"); - CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); - return; - } - } - // Traverse the code cache trying to dump the oldest nmethods - uint curr_max_comp_id = CompileBroker::get_compilation_id(); - uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked; + int curr_max_comp_id = CompileBroker::get_compilation_id(); + int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids; + log_sweep("start_cleaning"); nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); jint disconnected = 0; jint made_not_entrant = 0; + jint nmethod_count = 0; + while ((nm != NULL)){ - uint curr_comp_id = nm->compile_id(); + int curr_comp_id = nm->compile_id(); // OSR methods cannot be flushed like this. Also, don't flush native methods // since they are part of the JDK in most cases - if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) && - (!nm->is_native_method()) && ((curr_comp_id < flush_target))) { + if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) { - if ((nm->method()->code() == nm)) { - // This method has not been previously considered for - // unloading or it was restored already - CodeCache::speculatively_disconnect(nm); - disconnected++; - } else if (nm->is_speculatively_disconnected()) { - // This method was previously considered for preemptive unloading and was not called since then - CompilationPolicy::policy()->delay_compilation(nm->method()); - nm->make_not_entrant(); - made_not_entrant++; - } + // only count methods that can be speculatively disconnected + nmethod_count++; - if (curr_comp_id > _highest_marked) { - _highest_marked = curr_comp_id; + if (nm->is_in_use() && (curr_comp_id < flush_target)) { + if ((nm->method()->code() == nm)) { + // This method has not been previously considered for + // unloading or it was restored already + CodeCache::speculatively_disconnect(nm); + disconnected++; + } else if (nm->is_speculatively_disconnected()) { + // This method was previously considered for preemptive unloading and was not called since then + CompilationPolicy::policy()->delay_compilation(nm->method()); + nm->make_not_entrant(); + made_not_entrant++; + } + + if (curr_comp_id > _highest_marked) { + _highest_marked = curr_comp_id; + } } } nm = CodeCache::alive_nmethod(CodeCache::next(nm)); } + // remember how many compile_ids wheren't seen last flush. + _dead_compile_ids = curr_max_comp_id - nmethod_count; + log_sweep("stop_cleaning", "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'", disconnected, made_not_entrant); // Shut off compiler. Sweeper will start over with a new stack scan and // traversal cycle and turn it back on if it clears enough space. - if (was_full()) { - _last_was_full = os::javaTimeMillis(); - CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation); + if (is_full) { + _last_full_flush_time = os::javaTimeMillis(); } // After two more traversals the sweeper will get rid of unrestored nmethods - _was_full_traversal = _traversals; + _last_flush_traversal_id = _traversals; + _resweep = true; #ifdef ASSERT jlong end = os::javaTimeMillis(); if(PrintMethodFlushing && Verbose) { diff --git a/hotspot/src/share/vm/runtime/sweeper.hpp b/hotspot/src/share/vm/runtime/sweeper.hpp index f6e9bc3091c..ff63029f1cf 100644 --- a/hotspot/src/share/vm/runtime/sweeper.hpp +++ b/hotspot/src/share/vm/runtime/sweeper.hpp @@ -35,26 +35,29 @@ class NMethodSweeper : public AllStatic { static nmethod* _current; // Current nmethod static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache - static volatile int _invocations; // No. of invocations left until we are completed with this pass - static volatile int _sweep_started; // Flag to control conc sweeper + static volatile int _invocations; // No. of invocations left until we are completed with this pass + static volatile int _sweep_started; // Flag to control conc sweeper - static bool _rescan; // Indicates that we should do a full rescan of the - // of the code cache looking for work to do. - static bool _do_sweep; // Flag to skip the conc sweep if no stack scan happened - static int _locked_seen; // Number of locked nmethods encountered during the scan + //The following are reset in scan_stacks and synchronized by the safepoint + static bool _resweep; // Indicates that a change has happend and we want another sweep, + // always checked and reset at a safepoint so memory will be in sync. + static int _locked_seen; // Number of locked nmethods encountered during the scan static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack + static jint _flush_token; // token that guards method flushing, making sure it is executed only once. - static bool _was_full; // remember if we did emergency unloading - static jint _advise_to_sweep; // flag to indicate code cache getting full - static jlong _last_was_full; // timestamp of last emergency unloading - static uint _highest_marked; // highest compile id dumped at last emergency unloading - static long _was_full_traversal; // trav number at last emergency unloading + // These are set during a flush, a VM-operation + static long _last_flush_traversal_id; // trav number at last flush unloading + static jlong _last_full_flush_time; // timestamp of last emergency unloading + + // These are synchronized by the _sweep_started token + static int _highest_marked; // highest compile id dumped at last emergency unloading + static int _dead_compile_ids; // number of compile ids that where not in the cache last flush static void process_nmethod(nmethod *nm); - static void release_nmethod(nmethod* nm); static void log_sweep(const char* msg, const char* format = NULL, ...); + static bool sweep_in_progress(); public: static long traversal_count() { return _traversals; } @@ -71,17 +74,14 @@ class NMethodSweeper : public AllStatic { static void possibly_sweep(); // Compiler threads call this to sweep static void notify(nmethod* nm) { - // Perform a full scan of the code cache from the beginning. No + // Request a new sweep of the code cache from the beginning. No // need to synchronize the setting of this flag since it only // changes to false at safepoint so we can never overwrite it with false. - _rescan = true; + _resweep = true; } static void handle_full_code_cache(bool is_full); // Called by compilers who fail to allocate static void speculative_disconnect_nmethods(bool was_full); // Called by vm op to deal with alloc failure - - static void set_was_full(bool state) { _was_full = state; } - static bool was_full() { return _was_full; } }; #endif // SHARE_VM_RUNTIME_SWEEPER_HPP diff --git a/hotspot/src/share/vm/runtime/synchronizer.cpp b/hotspot/src/share/vm/runtime/synchronizer.cpp index 6e28286c1a8..ede9affb9f2 100644 --- a/hotspot/src/share/vm/runtime/synchronizer.cpp +++ b/hotspot/src/share/vm/runtime/synchronizer.cpp @@ -1018,7 +1018,8 @@ ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) { // We might be able to induce a STW safepoint and scavenge enough // objectMonitors to permit progress. if (temp == NULL) { - vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), "Allocate ObjectMonitors") ; + vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), OOM_MALLOC_ERROR, + "Allocate ObjectMonitors"); } // Format the block. diff --git a/hotspot/src/share/vm/runtime/thread.cpp b/hotspot/src/share/vm/runtime/thread.cpp index cf0297de099..7c3e256e2ef 100644 --- a/hotspot/src/share/vm/runtime/thread.cpp +++ b/hotspot/src/share/vm/runtime/thread.cpp @@ -3447,7 +3447,8 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { assert (Universe::is_fully_initialized(), "not initialized"); if (VerifyDuringStartup) { - VM_Verify verify_op(false /* silent */); // make sure we're starting with a clean slate + // Make sure we're starting with a clean slate. + VM_Verify verify_op; VMThread::execute(&verify_op); } diff --git a/hotspot/src/share/vm/runtime/thread.hpp b/hotspot/src/share/vm/runtime/thread.hpp index 259a4765cf4..d64ca311c65 100644 --- a/hotspot/src/share/vm/runtime/thread.hpp +++ b/hotspot/src/share/vm/runtime/thread.hpp @@ -1056,11 +1056,11 @@ class JavaThread: public Thread { #if INCLUDE_NMT // native memory tracking inline MemRecorder* get_recorder() const { return (MemRecorder*)_recorder; } - inline void set_recorder(MemRecorder* rc) { _recorder = (volatile MemRecorder*)rc; } + inline void set_recorder(MemRecorder* rc) { _recorder = rc; } private: // per-thread memory recorder - volatile MemRecorder* _recorder; + MemRecorder* volatile _recorder; #endif // INCLUDE_NMT // Suspend/resume support for JavaThread diff --git a/hotspot/src/share/vm/runtime/virtualspace.cpp b/hotspot/src/share/vm/runtime/virtualspace.cpp index dffb588dbae..ba68e887e17 100644 --- a/hotspot/src/share/vm/runtime/virtualspace.cpp +++ b/hotspot/src/share/vm/runtime/virtualspace.cpp @@ -60,72 +60,6 @@ ReservedSpace::ReservedSpace(size_t size, size_t alignment, initialize(size, alignment, large, NULL, 0, executable); } -char * -ReservedSpace::align_reserved_region(char* addr, const size_t len, - const size_t prefix_size, - const size_t prefix_align, - const size_t suffix_size, - const size_t suffix_align) -{ - assert(addr != NULL, "sanity"); - const size_t required_size = prefix_size + suffix_size; - assert(len >= required_size, "len too small"); - - const size_t s = size_t(addr); - const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1); - const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs; - - if (len < beg_delta + required_size) { - return NULL; // Cannot do proper alignment. - } - const size_t end_delta = len - (beg_delta + required_size); - - if (beg_delta != 0) { - os::release_memory(addr, beg_delta); - } - - if (end_delta != 0) { - char* release_addr = (char*) (s + beg_delta + required_size); - os::release_memory(release_addr, end_delta); - } - - return (char*) (s + beg_delta); -} - -char* ReservedSpace::reserve_and_align(const size_t reserve_size, - const size_t prefix_size, - const size_t prefix_align, - const size_t suffix_size, - const size_t suffix_align) -{ - assert(reserve_size > prefix_size + suffix_size, "should not be here"); - - char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align); - if (raw_addr == NULL) return NULL; - - char* result = align_reserved_region(raw_addr, reserve_size, prefix_size, - prefix_align, suffix_size, - suffix_align); - if (result == NULL && !os::release_memory(raw_addr, reserve_size)) { - fatal("os::release_memory failed"); - } - -#ifdef ASSERT - if (result != NULL) { - const size_t raw = size_t(raw_addr); - const size_t res = size_t(result); - assert(res >= raw, "alignment decreased start addr"); - assert(res + prefix_size + suffix_size <= raw + reserve_size, - "alignment increased end addr"); - assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix"); - assert(((res + prefix_size) & (suffix_align - 1)) == 0, - "bad alignment of suffix"); - } -#endif - - return result; -} - // Helper method. static bool failed_to_reserve_as_requested(char* base, char* requested_address, const size_t size, bool special) @@ -155,92 +89,6 @@ static bool failed_to_reserve_as_requested(char* base, char* requested_address, return true; } -ReservedSpace::ReservedSpace(const size_t suffix_size, - const size_t suffix_align, - char* requested_address, - const size_t noaccess_prefix) -{ - assert(suffix_size != 0, "sanity"); - assert(suffix_align != 0, "sanity"); - assert((suffix_size & (suffix_align - 1)) == 0, - "suffix_size not divisible by suffix_align"); - - // Assert that if noaccess_prefix is used, it is the same as prefix_align. - // Add in noaccess_prefix to prefix - const size_t adjusted_prefix_size = noaccess_prefix; - const size_t size = adjusted_prefix_size + suffix_size; - - // On systems where the entire region has to be reserved and committed up - // front, the compound alignment normally done by this method is unnecessary. - const bool try_reserve_special = UseLargePages && - suffix_align == os::large_page_size(); - if (!os::can_commit_large_page_memory() && try_reserve_special) { - initialize(size, suffix_align, true, requested_address, noaccess_prefix, - false); - return; - } - - _base = NULL; - _size = 0; - _alignment = 0; - _special = false; - _noaccess_prefix = 0; - _executable = false; - - // Optimistically try to reserve the exact size needed. - char* addr; - if (requested_address != 0) { - requested_address -= noaccess_prefix; // adjust address - assert(requested_address != NULL, "huge noaccess prefix?"); - addr = os::attempt_reserve_memory_at(size, requested_address); - if (failed_to_reserve_as_requested(addr, requested_address, size, false)) { - // OS ignored requested address. Try different address. - addr = NULL; - } - } else { - addr = os::reserve_memory(size, NULL, suffix_align); - } - if (addr == NULL) return; - - // Check whether the result has the needed alignment - const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1); - if (ofs != 0) { - // Wrong alignment. Release, allocate more space and do manual alignment. - // - // On most operating systems, another allocation with a somewhat larger size - // will return an address "close to" that of the previous allocation. The - // result is often the same address (if the kernel hands out virtual - // addresses from low to high), or an address that is offset by the increase - // in size. Exploit that to minimize the amount of extra space requested. - if (!os::release_memory(addr, size)) { - fatal("os::release_memory failed"); - } - - const size_t extra = MAX2(ofs, suffix_align - ofs); - addr = reserve_and_align(size + extra, adjusted_prefix_size, suffix_align, - suffix_size, suffix_align); - if (addr == NULL) { - // Try an even larger region. If this fails, address space is exhausted. - addr = reserve_and_align(size + suffix_align, adjusted_prefix_size, - suffix_align, suffix_size, suffix_align); - } - - if (requested_address != 0 && - failed_to_reserve_as_requested(addr, requested_address, size, false)) { - // As a result of the alignment constraints, the allocated addr differs - // from the requested address. Return back to the caller who can - // take remedial action (like try again without a requested address). - assert(_base == NULL, "should be"); - return; - } - } - - _base = addr; - _size = size; - _alignment = suffix_align; - _noaccess_prefix = noaccess_prefix; -} - void ReservedSpace::initialize(size_t size, size_t alignment, bool large, char* requested_address, const size_t noaccess_prefix, @@ -476,20 +324,6 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, protect_noaccess_prefix(size); } -ReservedHeapSpace::ReservedHeapSpace(const size_t heap_space_size, - const size_t alignment, - char* requested_address) : - ReservedSpace(heap_space_size, alignment, - requested_address, - (UseCompressedOops && (Universe::narrow_oop_base() != NULL) && - Universe::narrow_oop_use_implicit_null_checks()) ? - lcm(os::vm_page_size(), alignment) : 0) { - if (base() > 0) { - MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap); - } - protect_noaccess_prefix(heap_space_size); -} - // Reserve space for code segment. Same as Java heap only we mark this as // executable. ReservedCodeSpace::ReservedCodeSpace(size_t r_size, diff --git a/hotspot/src/share/vm/runtime/virtualspace.hpp b/hotspot/src/share/vm/runtime/virtualspace.hpp index 934efb88a14..0a959e900a0 100644 --- a/hotspot/src/share/vm/runtime/virtualspace.hpp +++ b/hotspot/src/share/vm/runtime/virtualspace.hpp @@ -47,28 +47,6 @@ class ReservedSpace VALUE_OBJ_CLASS_SPEC { const size_t noaccess_prefix, bool executable); - // Release parts of an already-reserved memory region [addr, addr + len) to - // get a new region that has "compound alignment." Return the start of the - // resulting region, or NULL on failure. - // - // The region is logically divided into a prefix and a suffix. The prefix - // starts at the result address, which is aligned to prefix_align. The suffix - // starts at result address + prefix_size, which is aligned to suffix_align. - // The total size of the result region is size prefix_size + suffix_size. - char* align_reserved_region(char* addr, const size_t len, - const size_t prefix_size, - const size_t prefix_align, - const size_t suffix_size, - const size_t suffix_align); - - // Reserve memory, call align_reserved_region() to alignment it and return the - // result. - char* reserve_and_align(const size_t reserve_size, - const size_t prefix_size, - const size_t prefix_align, - const size_t suffix_size, - const size_t suffix_align); - protected: // Create protection page at the beginning of the space. void protect_noaccess_prefix(const size_t size); @@ -79,9 +57,6 @@ class ReservedSpace VALUE_OBJ_CLASS_SPEC { ReservedSpace(size_t size, size_t alignment, bool large, char* requested_address = NULL, const size_t noaccess_prefix = 0); - ReservedSpace(const size_t suffix_size, const size_t suffix_align, - char* requested_address, - const size_t noaccess_prefix = 0); ReservedSpace(size_t size, size_t alignment, bool large, bool executable); // Accessors @@ -128,8 +103,6 @@ public: // Constructor ReservedHeapSpace(size_t size, size_t forced_base_alignment, bool large, char* requested_address); - ReservedHeapSpace(const size_t prefix_size, const size_t prefix_align, - char* requested_address); }; // Class encapsulating behavior specific memory space for Code diff --git a/hotspot/src/share/vm/runtime/vmStructs.cpp b/hotspot/src/share/vm/runtime/vmStructs.cpp index b00f6825ddd..b281d7ac39a 100644 --- a/hotspot/src/share/vm/runtime/vmStructs.cpp +++ b/hotspot/src/share/vm/runtime/vmStructs.cpp @@ -77,6 +77,7 @@ #include "oops/klass.hpp" #include "oops/markOop.hpp" #include "oops/methodData.hpp" +#include "oops/methodCounters.hpp" #include "oops/method.hpp" #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.hpp" @@ -348,16 +349,17 @@ typedef BinaryTreeDictionary